blob: 5d6d44bfe10a6f8b4442787f4eac3fc3bcccc34d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/transport.c
3 *
Steve Frenchad7a2922008-02-07 23:25:02 +00004 * Copyright (C) International Business Machines Corp., 2002,2008
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Author(s): Steve French (sfrench@us.ibm.com)
Steve French14a441a2b2006-07-16 04:32:51 +00006 * Jeremy Allison (jra@samba.org) 2006.
Steve French79a58d12007-07-06 22:44:50 +00007 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
Steve French79a58d12007-07-06 22:44:50 +000020 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 */
22
23#include <linux/fs.h>
24#include <linux/list.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/wait.h>
27#include <linux/net.h>
28#include <linux/delay.h>
Jeff Laytonf06ac722011-10-19 15:30:40 -040029#include <linux/freezer.h>
Jeff Laytonb8eed282012-09-18 16:20:35 -070030#include <linux/tcp.h>
Christoph Hellwig2f8b5442016-11-01 07:40:13 -060031#include <linux/bvec.h>
Jeff Layton97bc00b2012-09-18 16:20:35 -070032#include <linux/highmem.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080033#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/processor.h>
35#include <linux/mempool.h>
Ronnie Sahlberg14e25972019-05-13 11:24:17 +100036#include <linux/sched/signal.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "cifspdu.h"
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_debug.h"
Aurelien Aptel8bd68c62018-02-16 19:19:29 +010041#include "smb2proto.h"
Long Li9762c2d2017-11-22 17:38:43 -070042#include "smbdirect.h"
Steve French50c2f752007-07-13 00:33:32 +000043
Ronnie Sahlberg3cecf482017-11-21 15:08:07 +110044/* Max number of iovectors we can use off the stack when sending requests. */
45#define CIFS_MAX_IOV_SIZE 8
46
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +040047void
48cifs_wake_up_task(struct mid_q_entry *mid)
Jeff Layton2b84a36c2011-01-11 07:24:21 -050049{
50 wake_up_process(mid->callback_data);
51}
52
Jeff Laytona6827c12011-01-11 07:24:21 -050053struct mid_q_entry *
Jeff Layton24b9b062008-12-01 07:09:34 -050054AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
56 struct mid_q_entry *temp;
57
Jeff Layton24b9b062008-12-01 07:09:34 -050058 if (server == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -050059 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 return NULL;
61 }
Steve French50c2f752007-07-13 00:33:32 +000062
Pekka Enberg232087c2008-09-15 13:22:54 +030063 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
NeilBrowna6f74e82017-04-10 12:08:53 +100064 memset(temp, 0, sizeof(struct mid_q_entry));
Lars Persson696e4202018-06-25 14:05:25 +020065 kref_init(&temp->refcount);
NeilBrowna6f74e82017-04-10 12:08:53 +100066 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
Steve French1047abc2005-10-11 19:58:06 -070070 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
NeilBrowna6f74e82017-04-10 12:08:53 +100071 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
Jeff Layton2b84a36c2011-01-11 07:24:21 -050074
NeilBrowna6f74e82017-04-10 12:08:53 +100075 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
79 temp->callback = cifs_wake_up_task;
80 temp->callback_data = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 atomic_inc(&midCount);
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -040083 temp->mid_state = MID_REQUEST_ALLOCATED;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 return temp;
85}
86
Lars Persson696e4202018-06-25 14:05:25 +020087static void _cifs_mid_q_entry_release(struct kref *refcount)
88{
89 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
90 refcount);
91
92 mempool_free(mid, cifs_mid_poolp);
93}
94
95void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
96{
97 spin_lock(&GlobalMid_Lock);
98 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
99 spin_unlock(&GlobalMid_Lock);
100}
101
Jeff Layton766fdbb2011-01-11 07:24:21 -0500102void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103DeleteMidQEntry(struct mid_q_entry *midEntry)
104{
Steve French1047abc2005-10-11 19:58:06 -0700105#ifdef CONFIG_CIFS_STATS2
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400106 __le16 command = midEntry->server->vals->lock_cmd;
Steve French433b8dd2019-03-26 13:53:21 -0500107 __u16 smb_cmd = le16_to_cpu(midEntry->command);
Steve French1047abc2005-10-11 19:58:06 -0700108 unsigned long now;
Steve French433b8dd2019-03-26 13:53:21 -0500109 unsigned long roundtrip_time;
110 struct TCP_Server_Info *server = midEntry->server;
Steve French1047abc2005-10-11 19:58:06 -0700111#endif
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -0400112 midEntry->mid_state = MID_FREE;
Jeff Layton80975312011-01-11 07:24:02 -0500113 atomic_dec(&midCount);
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -0400114 if (midEntry->large_buf)
Steve Frenchb8643e12005-04-28 22:41:07 -0700115 cifs_buf_release(midEntry->resp_buf);
116 else
117 cifs_small_buf_release(midEntry->resp_buf);
Steve French1047abc2005-10-11 19:58:06 -0700118#ifdef CONFIG_CIFS_STATS2
119 now = jiffies;
Steve French433b8dd2019-03-26 13:53:21 -0500120 if (now < midEntry->when_alloc)
121 cifs_dbg(VFS, "invalid mid allocation time\n");
122 roundtrip_time = now - midEntry->when_alloc;
123
124 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
125 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
126 server->slowest_cmd[smb_cmd] = roundtrip_time;
127 server->fastest_cmd[smb_cmd] = roundtrip_time;
128 } else {
129 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
130 server->slowest_cmd[smb_cmd] = roundtrip_time;
131 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
132 server->fastest_cmd[smb_cmd] = roundtrip_time;
133 }
134 cifs_stats_inc(&server->num_cmds[smb_cmd]);
135 server->time_per_cmd[smb_cmd] += roundtrip_time;
136 }
Steve French00778e22018-09-18 14:05:18 -0500137 /*
138 * commands taking longer than one second (default) can be indications
139 * that something is wrong, unless it is quite a slow link or a very
140 * busy server. Note that this calc is unlikely or impossible to wrap
141 * as long as slow_rsp_threshold is not set way above recommended max
142 * value (32767 ie 9 hours) and is generally harmless even if wrong
143 * since only affects debug counters - so leaving the calc as simple
144 * comparison rather than doing multiple conversions and overflow
145 * checks
146 */
147 if ((slow_rsp_threshold != 0) &&
148 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
Steve French020eec52018-08-01 16:38:07 -0500149 (midEntry->command != command)) {
Steve Frenchf5942db2018-11-14 01:37:39 -0600150 /*
151 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
152 * NB: le16_to_cpu returns unsigned so can not be negative below
153 */
Steve French433b8dd2019-03-26 13:53:21 -0500154 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
155 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
Steve French468d6772018-08-04 05:24:34 -0500156
Steve French433b8dd2019-03-26 13:53:21 -0500157 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
Steve French020eec52018-08-01 16:38:07 -0500158 midEntry->when_sent, midEntry->when_received);
159 if (cifsFYI & CIFS_TIMER) {
Andy Shevchenko0b456f02014-08-27 16:49:44 +0300160 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
Steve French1047abc2005-10-11 19:58:06 -0700161 midEntry->command, midEntry->mid);
Rodrigo Freiref80eaed2018-10-07 12:21:26 -0300162 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
Steve French1047abc2005-10-11 19:58:06 -0700163 now - midEntry->when_alloc,
164 now - midEntry->when_sent,
165 now - midEntry->when_received);
166 }
167 }
168#endif
Lars Persson696e4202018-06-25 14:05:25 +0200169 cifs_mid_q_entry_release(midEntry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170}
171
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700172void
173cifs_delete_mid(struct mid_q_entry *mid)
Jeff Laytonddc8cf82011-01-11 07:24:02 -0500174{
175 spin_lock(&GlobalMid_Lock);
Ronnie Sahlbergddf83af2018-08-30 10:12:59 +1000176 list_del_init(&mid->qhead);
177 mid->mid_flags |= MID_DELETED;
Jeff Laytonddc8cf82011-01-11 07:24:02 -0500178 spin_unlock(&GlobalMid_Lock);
179
180 DeleteMidQEntry(mid);
181}
182
Jeff Layton6f49f462012-09-18 16:20:34 -0700183/*
184 * smb_send_kvec - send an array of kvecs to the server
185 * @server: Server to send the data to
Al Viro3ab3f2a2015-11-13 02:36:04 -0500186 * @smb_msg: Message to send
Jeff Layton6f49f462012-09-18 16:20:34 -0700187 * @sent: amount of data sent on socket is stored here
188 *
189 * Our basic "send data to server" function. Should be called with srv_mutex
190 * held. The caller is responsible for handling the results.
191 */
Steve Frenchd6e04ae2005-06-13 13:24:43 -0500192static int
Al Viro3ab3f2a2015-11-13 02:36:04 -0500193smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
194 size_t *sent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
196 int rc = 0;
Al Viro3ab3f2a2015-11-13 02:36:04 -0500197 int retries = 0;
Steve Frenchedf1ae42008-10-29 00:47:57 +0000198 struct socket *ssocket = server->ssocket;
Steve French50c2f752007-07-13 00:33:32 +0000199
Jeff Layton6f49f462012-09-18 16:20:34 -0700200 *sent = 0;
201
Al Viro3ab3f2a2015-11-13 02:36:04 -0500202 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
203 smb_msg->msg_namelen = sizeof(struct sockaddr);
204 smb_msg->msg_control = NULL;
205 smb_msg->msg_controllen = 0;
Jeff Layton0496e022008-12-30 12:39:16 -0500206 if (server->noblocksnd)
Al Viro3ab3f2a2015-11-13 02:36:04 -0500207 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
Steve Frenchedf1ae42008-10-29 00:47:57 +0000208 else
Al Viro3ab3f2a2015-11-13 02:36:04 -0500209 smb_msg->msg_flags = MSG_NOSIGNAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
Al Viro3ab3f2a2015-11-13 02:36:04 -0500211 while (msg_data_left(smb_msg)) {
Jeff Layton6f49f462012-09-18 16:20:34 -0700212 /*
213 * If blocking send, we try 3 times, since each can block
214 * for 5 seconds. For nonblocking we have to try more
215 * but wait increasing amounts of time allowing time for
216 * socket to clear. The overall time we wait in either
217 * case to send on the socket is about 15 seconds.
218 * Similarly we wait for 15 seconds for a response from
219 * the server in SendReceive[2] for the server to send
220 * a response back for most types of requests (except
221 * SMB Write past end of file which can be slow, and
222 * blocking lock operations). NFS waits slightly longer
223 * than CIFS, but this can make it take longer for
224 * nonresponsive servers to be detected and 15 seconds
225 * is more than enough time for modern networks to
226 * send a packet. In most cases if we fail to send
227 * after the retries we will kill the socket and
228 * reconnect which may clear the network problem.
229 */
Al Viro3ab3f2a2015-11-13 02:36:04 -0500230 rc = sock_sendmsg(ssocket, smb_msg);
Jeff Laytonce6c44e2013-03-22 08:36:45 -0400231 if (rc == -EAGAIN) {
Al Viro3ab3f2a2015-11-13 02:36:04 -0500232 retries++;
233 if (retries >= 14 ||
234 (!server->noblocksnd && (retries > 2))) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500235 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
236 ssocket);
Al Viro3ab3f2a2015-11-13 02:36:04 -0500237 return -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 }
Al Viro3ab3f2a2015-11-13 02:36:04 -0500239 msleep(1 << retries);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 continue;
241 }
Jeff Layton6f49f462012-09-18 16:20:34 -0700242
Steve French79a58d12007-07-06 22:44:50 +0000243 if (rc < 0)
Al Viro3ab3f2a2015-11-13 02:36:04 -0500244 return rc;
Jeff Layton6f49f462012-09-18 16:20:34 -0700245
Steve French79a58d12007-07-06 22:44:50 +0000246 if (rc == 0) {
Steve French3e844692005-10-03 13:37:24 -0700247 /* should never happen, letting socket clear before
248 retrying is our only obvious option here */
Joe Perchesf96637b2013-05-04 22:12:25 -0500249 cifs_dbg(VFS, "tcp sent no data\n");
Steve French3e844692005-10-03 13:37:24 -0700250 msleep(500);
251 continue;
252 }
Jeff Layton6f49f462012-09-18 16:20:34 -0700253
Al Viro3ab3f2a2015-11-13 02:36:04 -0500254 /* send was at least partially successful */
255 *sent += rc;
256 retries = 0; /* in case we get ENOSPC on the next send */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
Al Viro3ab3f2a2015-11-13 02:36:04 -0500258 return 0;
Jeff Layton97bc00b2012-09-18 16:20:35 -0700259}
260
Paulo Alcantara35e2cc12018-06-15 10:22:44 -0300261unsigned long
Ronnie Sahlberg81f39f92018-06-28 10:47:14 +1000262smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
Jeff Laytona26054d2014-02-14 07:21:00 -0500263{
264 unsigned int i;
Paulo Alcantara35e2cc12018-06-15 10:22:44 -0300265 struct kvec *iov;
266 int nvec;
Jeff Laytona26054d2014-02-14 07:21:00 -0500267 unsigned long buflen = 0;
268
Ronnie Sahlberg81f39f92018-06-28 10:47:14 +1000269 if (server->vals->header_preamble_size == 0 &&
270 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
Paulo Alcantara35e2cc12018-06-15 10:22:44 -0300271 iov = &rqst->rq_iov[1];
272 nvec = rqst->rq_nvec - 1;
273 } else {
274 iov = rqst->rq_iov;
275 nvec = rqst->rq_nvec;
276 }
277
Jeff Laytona26054d2014-02-14 07:21:00 -0500278 /* total up iov array first */
Paulo Alcantara35e2cc12018-06-15 10:22:44 -0300279 for (i = 0; i < nvec; i++)
Jeff Laytona26054d2014-02-14 07:21:00 -0500280 buflen += iov[i].iov_len;
281
Long Lic06a0f22018-05-30 12:47:57 -0700282 /*
283 * Add in the page array if there is one. The caller needs to make
284 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
285 * multiple pages ends at page boundary, rq_tailsz needs to be set to
286 * PAGE_SIZE.
287 */
Jeff Laytona26054d2014-02-14 07:21:00 -0500288 if (rqst->rq_npages) {
Long Lic06a0f22018-05-30 12:47:57 -0700289 if (rqst->rq_npages == 1)
290 buflen += rqst->rq_tailsz;
291 else {
292 /*
293 * If there is more than one page, calculate the
294 * buffer length based on rq_offset and rq_tailsz
295 */
296 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
297 rqst->rq_offset;
298 buflen += rqst->rq_tailsz;
299 }
Jeff Laytona26054d2014-02-14 07:21:00 -0500300 }
301
302 return buflen;
303}
304
Jeff Layton6f49f462012-09-18 16:20:34 -0700305static int
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000306__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
307 struct smb_rqst *rqst)
Jeff Layton6f49f462012-09-18 16:20:34 -0700308{
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000309 int rc = 0;
310 struct kvec *iov;
311 int n_vec;
312 unsigned int send_length = 0;
313 unsigned int i, j;
Pavel Shilovskyb30c74c2019-03-05 15:51:57 -0800314 sigset_t mask, oldmask;
Al Viro3ab3f2a2015-11-13 02:36:04 -0500315 size_t total_len = 0, sent, size;
Jeff Laytonb8eed282012-09-18 16:20:35 -0700316 struct socket *ssocket = server->ssocket;
Al Viro3ab3f2a2015-11-13 02:36:04 -0500317 struct msghdr smb_msg;
Jeff Laytonb8eed282012-09-18 16:20:35 -0700318 int val = 1;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +1000319 __be32 rfc1002_marker;
320
Long Li9762c2d2017-11-22 17:38:43 -0700321 if (cifs_rdma_enabled(server) && server->smbd_conn) {
Long Li4739f232019-04-15 14:49:17 -0700322 rc = smbd_send(server, num_rqst, rqst);
Long Li9762c2d2017-11-22 17:38:43 -0700323 goto smbd_done;
324 }
Pavel Shilovskyafc18a62019-03-05 15:51:56 -0800325
Jeff Laytonea702b82012-12-27 07:28:55 -0500326 if (ssocket == NULL)
Pavel Shilovskyafc18a62019-03-05 15:51:56 -0800327 return -EAGAIN;
Jeff Laytonea702b82012-12-27 07:28:55 -0500328
Pavel Shilovskyb30c74c2019-03-05 15:51:57 -0800329 if (signal_pending(current)) {
330 cifs_dbg(FYI, "signal is pending before sending any data\n");
331 return -EINTR;
332 }
333
Jeff Laytonb8eed282012-09-18 16:20:35 -0700334 /* cork the socket */
335 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
336 (char *)&val, sizeof(val));
337
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000338 for (j = 0; j < num_rqst; j++)
Ronnie Sahlberg81f39f92018-06-28 10:47:14 +1000339 send_length += smb_rqst_len(server, &rqst[j]);
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000340 rfc1002_marker = cpu_to_be32(send_length);
341
Pavel Shilovskyb30c74c2019-03-05 15:51:57 -0800342 /*
343 * We should not allow signals to interrupt the network send because
344 * any partial send will cause session reconnects thus increasing
345 * latency of system calls and overload a server with unnecessary
346 * requests.
347 */
348
349 sigfillset(&mask);
350 sigprocmask(SIG_BLOCK, &mask, &oldmask);
351
Ronnie Sahlbergc713c872018-06-12 08:00:58 +1000352 /* Generate a rfc1002 marker for SMB2+ */
353 if (server->vals->header_preamble_size == 0) {
354 struct kvec hiov = {
355 .iov_base = &rfc1002_marker,
356 .iov_len = 4
357 };
David Howellsaa563d72018-10-20 00:57:56 +0100358 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
Ronnie Sahlbergc713c872018-06-12 08:00:58 +1000359 rc = smb_send_kvec(server, &smb_msg, &sent);
360 if (rc < 0)
Pavel Shilovskyb30c74c2019-03-05 15:51:57 -0800361 goto unmask;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +1000362
363 total_len += sent;
364 send_length += 4;
365 }
366
Paulo Alcantara662bf5b2018-06-14 17:34:08 -0300367 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
368
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000369 for (j = 0; j < num_rqst; j++) {
370 iov = rqst[j].rq_iov;
371 n_vec = rqst[j].rq_nvec;
Ronnie Sahlbergc713c872018-06-12 08:00:58 +1000372
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000373 size = 0;
Paulo Alcantara662bf5b2018-06-14 17:34:08 -0300374 for (i = 0; i < n_vec; i++) {
375 dump_smb(iov[i].iov_base, iov[i].iov_len);
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000376 size += iov[i].iov_len;
Paulo Alcantara662bf5b2018-06-14 17:34:08 -0300377 }
Al Viro3ab3f2a2015-11-13 02:36:04 -0500378
David Howellsaa563d72018-10-20 00:57:56 +0100379 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
Al Viro3ab3f2a2015-11-13 02:36:04 -0500381 rc = smb_send_kvec(server, &smb_msg, &sent);
Jeff Layton97bc00b2012-09-18 16:20:35 -0700382 if (rc < 0)
Pavel Shilovskyb30c74c2019-03-05 15:51:57 -0800383 goto unmask;
Jeff Layton97bc00b2012-09-18 16:20:35 -0700384
385 total_len += sent;
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000386
387 /* now walk the page array and send each page in it */
388 for (i = 0; i < rqst[j].rq_npages; i++) {
389 struct bio_vec bvec;
390
391 bvec.bv_page = rqst[j].rq_pages[i];
392 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
393 &bvec.bv_offset);
394
David Howellsaa563d72018-10-20 00:57:56 +0100395 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000396 &bvec, 1, bvec.bv_len);
397 rc = smb_send_kvec(server, &smb_msg, &sent);
398 if (rc < 0)
399 break;
400
401 total_len += sent;
402 }
Jeff Layton97bc00b2012-09-18 16:20:35 -0700403 }
404
Pavel Shilovskyb30c74c2019-03-05 15:51:57 -0800405unmask:
406 sigprocmask(SIG_SETMASK, &oldmask, NULL);
407
408 /*
409 * If signal is pending but we have already sent the whole packet to
410 * the server we need to return success status to allow a corresponding
411 * mid entry to be kept in the pending requests queue thus allowing
412 * to handle responses from the server by the client.
413 *
414 * If only part of the packet has been sent there is no need to hide
415 * interrupt because the session will be reconnected anyway, so there
416 * won't be any response from the server to handle.
417 */
418
419 if (signal_pending(current) && (total_len != send_length)) {
420 cifs_dbg(FYI, "signal is pending after attempt to send\n");
421 rc = -EINTR;
422 }
423
Jeff Laytonb8eed282012-09-18 16:20:35 -0700424 /* uncork it */
425 val = 0;
426 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
427 (char *)&val, sizeof(val));
428
Ronnie Sahlbergc713c872018-06-12 08:00:58 +1000429 if ((total_len > 0) && (total_len != send_length)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500430 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
Ronnie Sahlbergc713c872018-06-12 08:00:58 +1000431 send_length, total_len);
Jeff Layton6f49f462012-09-18 16:20:34 -0700432 /*
433 * If we have only sent part of an SMB then the next SMB could
434 * be taken as the remainder of this one. We need to kill the
435 * socket so the server throws away the partial SMB
436 */
Steve Frenchedf1ae42008-10-29 00:47:57 +0000437 server->tcpStatus = CifsNeedReconnect;
Steve Frenchbf1fdeb2018-07-30 19:23:09 -0500438 trace_smb3_partial_send_reconnect(server->CurrentMid,
439 server->hostname);
Steve Frenchedf1ae42008-10-29 00:47:57 +0000440 }
Long Li9762c2d2017-11-22 17:38:43 -0700441smbd_done:
Jeff Laytond804d412011-01-28 15:05:43 -0500442 if (rc < 0 && rc != -EINTR)
Joe Perchesf96637b2013-05-04 22:12:25 -0500443 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
444 rc);
Pavel Shilovskyee139192019-01-10 11:27:28 -0800445 else if (rc > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448 return rc;
449}
450
Jeff Layton6f49f462012-09-18 16:20:34 -0700451static int
Ronnie Sahlberg1f3a8f52018-08-01 09:26:12 +1000452smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
453 struct smb_rqst *rqst, int flags)
Jeff Layton6f49f462012-09-18 16:20:34 -0700454{
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +1000455 struct kvec iov;
456 struct smb2_transform_hdr tr_hdr;
457 struct smb_rqst cur_rqst[MAX_COMPOUND];
Pavel Shilovsky7fb89862016-10-31 13:49:30 -0700458 int rc;
Jeff Layton6f49f462012-09-18 16:20:34 -0700459
Pavel Shilovsky7fb89862016-10-31 13:49:30 -0700460 if (!(flags & CIFS_TRANSFORM_REQ))
Ronnie Sahlberg1f3a8f52018-08-01 09:26:12 +1000461 return __smb_send_rqst(server, num_rqst, rqst);
462
463 if (num_rqst > MAX_COMPOUND - 1)
464 return -ENOMEM;
Pavel Shilovsky7fb89862016-10-31 13:49:30 -0700465
Ronnie Sahlbergb2c96de2018-08-01 09:26:11 +1000466 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
467 memset(&iov, 0, sizeof(iov));
468 memset(&tr_hdr, 0, sizeof(tr_hdr));
469
470 iov.iov_base = &tr_hdr;
471 iov.iov_len = sizeof(tr_hdr);
472 cur_rqst[0].rq_iov = &iov;
473 cur_rqst[0].rq_nvec = 1;
474
475 if (!server->ops->init_transform_rq) {
476 cifs_dbg(VFS, "Encryption requested but transform callback "
477 "is missing\n");
Pavel Shilovsky7fb89862016-10-31 13:49:30 -0700478 return -EIO;
479 }
480
Ronnie Sahlberg1f3a8f52018-08-01 09:26:12 +1000481 rc = server->ops->init_transform_rq(server, num_rqst + 1,
482 &cur_rqst[0], rqst);
Pavel Shilovsky7fb89862016-10-31 13:49:30 -0700483 if (rc)
484 return rc;
485
Ronnie Sahlberg1f3a8f52018-08-01 09:26:12 +1000486 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
487 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
Pavel Shilovsky7fb89862016-10-31 13:49:30 -0700488 return rc;
Jeff Layton6f49f462012-09-18 16:20:34 -0700489}
490
Jeff Layton0496e022008-12-30 12:39:16 -0500491int
492smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
493 unsigned int smb_buf_length)
494{
Pavel Shilovsky738f9de2016-11-23 15:14:57 -0800495 struct kvec iov[2];
Pavel Shilovsky7fb89862016-10-31 13:49:30 -0700496 struct smb_rqst rqst = { .rq_iov = iov,
497 .rq_nvec = 2 };
Jeff Layton0496e022008-12-30 12:39:16 -0500498
Pavel Shilovsky738f9de2016-11-23 15:14:57 -0800499 iov[0].iov_base = smb_buffer;
500 iov[0].iov_len = 4;
501 iov[1].iov_base = (char *)smb_buffer + 4;
502 iov[1].iov_len = smb_buf_length;
Jeff Layton0496e022008-12-30 12:39:16 -0500503
Ronnie Sahlberg07cd9522018-06-12 08:01:00 +1000504 return __smb_send_rqst(server, 1, &rqst);
Jeff Layton0496e022008-12-30 12:39:16 -0500505}
506
Pavel Shilovskyfc40f9c2012-02-17 17:09:12 +0300507static int
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000508wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000509 const int timeout, const int flags,
510 unsigned int *instance)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000511{
Pavel Shilovsky5bc59492012-02-21 19:56:08 +0300512 int rc;
Ronnie Sahlberg4230cff2019-03-08 12:58:19 +1000513 int *credits;
514 int optype;
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000515 long int t;
516
517 if (timeout < 0)
518 t = MAX_JIFFY_OFFSET;
519 else
520 t = msecs_to_jiffies(timeout);
Ronnie Sahlberg4230cff2019-03-08 12:58:19 +1000521
522 optype = flags & CIFS_OP_MASK;
Pavel Shilovsky5bc59492012-02-21 19:56:08 +0300523
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -0800524 *instance = 0;
525
Ronnie Sahlberg4230cff2019-03-08 12:58:19 +1000526 credits = server->ops->get_credits_field(server, optype);
527 /* Since an echo is already inflight, no need to wait to send another */
528 if (*credits <= 0 && optype == CIFS_ECHO_OP)
529 return -EAGAIN;
530
Pavel Shilovskyfc40f9c2012-02-17 17:09:12 +0300531 spin_lock(&server->req_lock);
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +1000532 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000533 /* oplock breaks must not be held up */
Pavel Shilovskyfc40f9c2012-02-17 17:09:12 +0300534 server->in_flight++;
Pavel Shilovskybc205ed2012-03-15 13:22:27 +0300535 *credits -= 1;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -0800536 *instance = server->reconnect_instance;
Pavel Shilovskyfc40f9c2012-02-17 17:09:12 +0300537 spin_unlock(&server->req_lock);
Volker Lendecke27a97a62008-12-08 20:59:39 +0000538 return 0;
539 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000540
Volker Lendecke27a97a62008-12-08 20:59:39 +0000541 while (1) {
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000542 if (*credits < num_credits) {
Pavel Shilovskyfc40f9c2012-02-17 17:09:12 +0300543 spin_unlock(&server->req_lock);
Steve French789e6662011-08-09 18:44:44 +0000544 cifs_num_waiters_inc(server);
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000545 rc = wait_event_killable_timeout(server->request_q,
546 has_credits(server, credits, num_credits), t);
Steve French789e6662011-08-09 18:44:44 +0000547 cifs_num_waiters_dec(server);
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000548 if (!rc) {
Steve French7937ca92019-03-09 20:29:55 -0600549 trace_smb3_credit_timeout(server->CurrentMid,
550 server->hostname, num_credits);
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000551 cifs_dbg(VFS, "wait timed out after %d ms\n",
552 timeout);
553 return -ENOTSUPP;
554 }
555 if (rc == -ERESTARTSYS)
556 return -ERESTARTSYS;
Pavel Shilovskyfc40f9c2012-02-17 17:09:12 +0300557 spin_lock(&server->req_lock);
Volker Lendecke27a97a62008-12-08 20:59:39 +0000558 } else {
Jeff Laytonc5797a92011-01-11 07:24:01 -0500559 if (server->tcpStatus == CifsExiting) {
Pavel Shilovskyfc40f9c2012-02-17 17:09:12 +0300560 spin_unlock(&server->req_lock);
Volker Lendecke27a97a62008-12-08 20:59:39 +0000561 return -ENOENT;
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000562 }
Volker Lendecke27a97a62008-12-08 20:59:39 +0000563
Pavel Shilovsky2d86dbc2012-02-06 15:59:18 +0400564 /*
Ronnie Sahlberg16b34aa2019-03-08 12:58:21 +1000565 * For normal commands, reserve the last MAX_COMPOUND
566 * credits to compound requests.
567 * Otherwise these compounds could be permanently
568 * starved for credits by single-credit requests.
569 *
570 * To prevent spinning CPU, block this thread until
571 * there are >MAX_COMPOUND credits available.
572 * But only do this is we already have a lot of
573 * credits in flight to avoid triggering this check
574 * for servers that are slow to hand out credits on
575 * new sessions.
576 */
577 if (!optype && num_credits == 1 &&
578 server->in_flight > 2 * MAX_COMPOUND &&
579 *credits <= MAX_COMPOUND) {
580 spin_unlock(&server->req_lock);
581 cifs_num_waiters_inc(server);
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000582 rc = wait_event_killable_timeout(
583 server->request_q,
Ronnie Sahlberg16b34aa2019-03-08 12:58:21 +1000584 has_credits(server, credits,
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000585 MAX_COMPOUND + 1),
586 t);
Ronnie Sahlberg16b34aa2019-03-08 12:58:21 +1000587 cifs_num_waiters_dec(server);
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000588 if (!rc) {
Steve French7937ca92019-03-09 20:29:55 -0600589 trace_smb3_credit_timeout(
590 server->CurrentMid,
591 server->hostname, num_credits);
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000592 cifs_dbg(VFS, "wait timed out after %d ms\n",
593 timeout);
594 return -ENOTSUPP;
595 }
596 if (rc == -ERESTARTSYS)
597 return -ERESTARTSYS;
Ronnie Sahlberg16b34aa2019-03-08 12:58:21 +1000598 spin_lock(&server->req_lock);
599 continue;
600 }
601
602 /*
Pavel Shilovsky2d86dbc2012-02-06 15:59:18 +0400603 * Can not count locking commands against total
604 * as they are allowed to block on server.
605 */
Volker Lendecke27a97a62008-12-08 20:59:39 +0000606
607 /* update # of requests on the wire to server */
Ronnie Sahlberg4230cff2019-03-08 12:58:19 +1000608 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
Ronnie Sahlbergb227d212019-03-08 12:58:20 +1000609 *credits -= num_credits;
610 server->in_flight += num_credits;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -0800611 *instance = server->reconnect_instance;
Pavel Shilovsky2d86dbc2012-02-06 15:59:18 +0400612 }
Pavel Shilovskyfc40f9c2012-02-17 17:09:12 +0300613 spin_unlock(&server->req_lock);
Volker Lendecke27a97a62008-12-08 20:59:39 +0000614 break;
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000615 }
616 }
617 return 0;
618}
619
Pavel Shilovskybc205ed2012-03-15 13:22:27 +0300620static int
Ronnie Sahlberg480b1cb2019-03-08 12:58:18 +1000621wait_for_free_request(struct TCP_Server_Info *server, const int flags,
622 unsigned int *instance)
Pavel Shilovskybc205ed2012-03-15 13:22:27 +0300623{
Ronnie Sahlberg2b53b922019-03-08 12:58:22 +1000624 return wait_for_free_credits(server, 1, -1, flags,
625 instance);
Pavel Shilovskybc205ed2012-03-15 13:22:27 +0300626}
627
Ronnie Sahlberg257b7802019-03-11 12:18:58 +1000628static int
629wait_for_compound_request(struct TCP_Server_Info *server, int num,
630 const int flags, unsigned int *instance)
631{
632 int *credits;
633
634 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
635
636 spin_lock(&server->req_lock);
637 if (*credits < num) {
638 /*
639 * Return immediately if not too many requests in flight since
640 * we will likely be stuck on waiting for credits.
641 */
642 if (server->in_flight < num - *credits) {
643 spin_unlock(&server->req_lock);
644 return -ENOTSUPP;
645 }
646 }
647 spin_unlock(&server->req_lock);
648
649 return wait_for_free_credits(server, num, 60000, flags,
650 instance);
651}
652
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400653int
654cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800655 unsigned int *num, struct cifs_credits *credits)
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400656{
657 *num = size;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800658 credits->value = 0;
659 credits->instance = server->reconnect_instance;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400660 return 0;
661}
662
Steve French96daf2b2011-05-27 04:34:02 +0000663static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000664 struct mid_q_entry **ppmidQ)
665{
666 if (ses->server->tcpStatus == CifsExiting) {
667 return -ENOENT;
Volker Lendecke8fbbd362008-12-06 13:12:34 +0100668 }
669
670 if (ses->server->tcpStatus == CifsNeedReconnect) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500671 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000672 return -EAGAIN;
Volker Lendecke8fbbd362008-12-06 13:12:34 +0100673 }
674
Shirish Pargaonkar7f485582013-10-12 10:06:03 -0500675 if (ses->status == CifsNew) {
Steve French79a58d12007-07-06 22:44:50 +0000676 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
Steve Frenchad7a2922008-02-07 23:25:02 +0000677 (in_buf->Command != SMB_COM_NEGOTIATE))
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000678 return -EAGAIN;
Steve Frenchad7a2922008-02-07 23:25:02 +0000679 /* else ok - we are setting up session */
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000680 }
Shirish Pargaonkar7f485582013-10-12 10:06:03 -0500681
682 if (ses->status == CifsExiting) {
683 /* check if SMB session is bad because we are setting it up */
684 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
685 return -EAGAIN;
686 /* else ok - we are shutting down session */
687 }
688
Jeff Layton24b9b062008-12-01 07:09:34 -0500689 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
Steve French26f57362007-08-30 22:09:15 +0000690 if (*ppmidQ == NULL)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000691 return -ENOMEM;
Jeff Laytonddc8cf82011-01-11 07:24:02 -0500692 spin_lock(&GlobalMid_Lock);
693 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
694 spin_unlock(&GlobalMid_Lock);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000695 return 0;
696}
697
Jeff Layton0ade6402011-01-11 07:24:02 -0500698static int
699wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000700{
Jeff Layton0ade6402011-01-11 07:24:02 -0500701 int error;
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000702
Colin Cross5853cc22013-05-07 17:52:05 +0000703 error = wait_event_freezekillable_unsafe(server->response_q,
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -0400704 midQ->mid_state != MID_REQUEST_SUBMITTED);
Jeff Layton0ade6402011-01-11 07:24:02 -0500705 if (error < 0)
706 return -ERESTARTSYS;
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000707
Jeff Layton0ade6402011-01-11 07:24:02 -0500708 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000709}
710
Jeff Laytonfec344e2012-09-18 16:20:35 -0700711struct mid_q_entry *
712cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400713{
714 int rc;
Jeff Laytonfec344e2012-09-18 16:20:35 -0700715 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400716 struct mid_q_entry *mid;
717
Pavel Shilovsky738f9de2016-11-23 15:14:57 -0800718 if (rqst->rq_iov[0].iov_len != 4 ||
719 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
720 return ERR_PTR(-EIO);
721
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400722 /* enable signing if server requires it */
Jeff Layton38d77c52013-05-26 07:01:00 -0400723 if (server->sign)
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400724 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
725
726 mid = AllocMidQEntry(hdr, server);
727 if (mid == NULL)
Jeff Laytonfec344e2012-09-18 16:20:35 -0700728 return ERR_PTR(-ENOMEM);
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400729
Jeff Laytonfec344e2012-09-18 16:20:35 -0700730 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
Sachin Prabhuffc61cc2012-07-11 12:28:05 +0100731 if (rc) {
732 DeleteMidQEntry(mid);
Jeff Laytonfec344e2012-09-18 16:20:35 -0700733 return ERR_PTR(rc);
Sachin Prabhuffc61cc2012-07-11 12:28:05 +0100734 }
735
Jeff Laytonfec344e2012-09-18 16:20:35 -0700736 return mid;
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400737}
Steve French133672e2007-11-13 22:41:37 +0000738
739/*
Jeff Laytona6827c12011-01-11 07:24:21 -0500740 * Send a SMB request and set the callback function in the mid to handle
741 * the result. Caller is responsible for dealing with timeouts.
742 */
743int
Jeff Laytonfec344e2012-09-18 16:20:35 -0700744cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
Pavel Shilovsky9b7c18a2016-11-16 14:06:17 -0800745 mid_receive_t *receive, mid_callback_t *callback,
Pavel Shilovsky3349c3a2019-01-15 15:52:29 -0800746 mid_handle_t *handle, void *cbdata, const int flags,
747 const struct cifs_credits *exist_credits)
Jeff Laytona6827c12011-01-11 07:24:21 -0500748{
Ronnie Sahlberg480b1cb2019-03-08 12:58:18 +1000749 int rc;
Jeff Laytona6827c12011-01-11 07:24:21 -0500750 struct mid_q_entry *mid;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800751 struct cifs_credits credits = { .value = 0, .instance = 0 };
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -0800752 unsigned int instance;
Ronnie Sahlberg480b1cb2019-03-08 12:58:18 +1000753 int optype;
Jeff Laytona6827c12011-01-11 07:24:21 -0500754
Pavel Shilovskya891f0f2012-05-23 16:14:34 +0400755 optype = flags & CIFS_OP_MASK;
756
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400757 if ((flags & CIFS_HAS_CREDITS) == 0) {
Ronnie Sahlberg480b1cb2019-03-08 12:58:18 +1000758 rc = wait_for_free_request(server, flags, &instance);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +0400759 if (rc)
760 return rc;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800761 credits.value = 1;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -0800762 credits.instance = instance;
Pavel Shilovsky3349c3a2019-01-15 15:52:29 -0800763 } else
764 instance = exist_credits->instance;
Jeff Laytona6827c12011-01-11 07:24:21 -0500765
766 mutex_lock(&server->srv_mutex);
Pavel Shilovsky3349c3a2019-01-15 15:52:29 -0800767
768 /*
769 * We can't use credits obtained from the previous session to send this
770 * request. Check if there were reconnects after we obtained credits and
771 * return -EAGAIN in such cases to let callers handle it.
772 */
773 if (instance != server->reconnect_instance) {
774 mutex_unlock(&server->srv_mutex);
775 add_credits_and_wake_if(server, &credits, optype);
776 return -EAGAIN;
777 }
778
Jeff Laytonfec344e2012-09-18 16:20:35 -0700779 mid = server->ops->setup_async_request(server, rqst);
780 if (IS_ERR(mid)) {
Jeff Laytona6827c12011-01-11 07:24:21 -0500781 mutex_unlock(&server->srv_mutex);
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800782 add_credits_and_wake_if(server, &credits, optype);
Jeff Laytonfec344e2012-09-18 16:20:35 -0700783 return PTR_ERR(mid);
Jeff Laytona6827c12011-01-11 07:24:21 -0500784 }
785
Jeff Layton44d22d82011-10-19 15:29:49 -0400786 mid->receive = receive;
Jeff Laytona6827c12011-01-11 07:24:21 -0500787 mid->callback = callback;
788 mid->callback_data = cbdata;
Pavel Shilovsky9b7c18a2016-11-16 14:06:17 -0800789 mid->handle = handle;
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -0400790 mid->mid_state = MID_REQUEST_SUBMITTED;
Steve French789e6662011-08-09 18:44:44 +0000791
Sachin Prabhuffc61cc2012-07-11 12:28:05 +0100792 /* put it on the pending_mid_q */
793 spin_lock(&GlobalMid_Lock);
794 list_add_tail(&mid->qhead, &server->pending_mid_q);
795 spin_unlock(&GlobalMid_Lock);
796
Long Li93d2cb62017-06-28 15:55:55 -0700797 /*
798 * Need to store the time in mid before calling I/O. For call_async,
799 * I/O response may come back and free the mid entry on another thread.
800 */
801 cifs_save_when_sent(mid);
Steve French789e6662011-08-09 18:44:44 +0000802 cifs_in_send_inc(server);
Ronnie Sahlberg1f3a8f52018-08-01 09:26:12 +1000803 rc = smb_send_rqst(server, 1, rqst, flags);
Steve French789e6662011-08-09 18:44:44 +0000804 cifs_in_send_dec(server);
Jeff Laytonad313cb2013-04-03 10:27:36 -0400805
Rabin Vincent820962d2015-12-23 07:32:41 +0100806 if (rc < 0) {
Pavel Shilovskyc781af72019-03-04 14:02:50 -0800807 revert_current_mid(server, mid->credits);
Jeff Laytonad313cb2013-04-03 10:27:36 -0400808 server->sequence_number -= 2;
Rabin Vincent820962d2015-12-23 07:32:41 +0100809 cifs_delete_mid(mid);
810 }
811
Jeff Laytona6827c12011-01-11 07:24:21 -0500812 mutex_unlock(&server->srv_mutex);
Steve French789e6662011-08-09 18:44:44 +0000813
Sachin Prabhuffc61cc2012-07-11 12:28:05 +0100814 if (rc == 0)
815 return 0;
Jeff Laytona6827c12011-01-11 07:24:21 -0500816
Pavel Shilovsky335b7b62019-01-16 11:12:41 -0800817 add_credits_and_wake_if(server, &credits, optype);
Jeff Laytona6827c12011-01-11 07:24:21 -0500818 return rc;
819}
820
821/*
Steve French133672e2007-11-13 22:41:37 +0000822 *
823 * Send an SMB Request. No response info (other than return code)
824 * needs to be parsed.
825 *
826 * flags indicate the type of request buffer and how long to wait
827 * and whether to log NT STATUS code (error) before mapping it to POSIX error
828 *
829 */
830int
Steve French96daf2b2011-05-27 04:34:02 +0000831SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400832 char *in_buf, int flags)
Steve French133672e2007-11-13 22:41:37 +0000833{
834 int rc;
835 struct kvec iov[1];
Pavel Shilovskyda502f72016-10-25 11:38:47 -0700836 struct kvec rsp_iov;
Steve French133672e2007-11-13 22:41:37 +0000837 int resp_buf_type;
838
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400839 iov[0].iov_base = in_buf;
840 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +1000841 flags |= CIFS_NO_RSP_BUF;
Pavel Shilovskyda502f72016-10-25 11:38:47 -0700842 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
Joe Perchesf96637b2013-05-04 22:12:25 -0500843 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
Steve French90c81e02008-02-12 20:32:36 +0000844
Steve French133672e2007-11-13 22:41:37 +0000845 return rc;
846}
847
Jeff Layton053d5032011-01-11 07:24:02 -0500848static int
Jeff Layton3c1105d2011-05-22 07:09:13 -0400849cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
Jeff Layton053d5032011-01-11 07:24:02 -0500850{
851 int rc = 0;
852
Joe Perchesf96637b2013-05-04 22:12:25 -0500853 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
854 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
Jeff Layton053d5032011-01-11 07:24:02 -0500855
Jeff Layton74dd92a2011-01-11 07:24:02 -0500856 spin_lock(&GlobalMid_Lock);
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -0400857 switch (mid->mid_state) {
Jeff Layton74dd92a2011-01-11 07:24:02 -0500858 case MID_RESPONSE_RECEIVED:
Jeff Layton053d5032011-01-11 07:24:02 -0500859 spin_unlock(&GlobalMid_Lock);
860 return rc;
Jeff Layton74dd92a2011-01-11 07:24:02 -0500861 case MID_RETRY_NEEDED:
862 rc = -EAGAIN;
863 break;
Jeff Layton71823ba2011-02-10 08:03:50 -0500864 case MID_RESPONSE_MALFORMED:
865 rc = -EIO;
866 break;
Jeff Layton3c1105d2011-05-22 07:09:13 -0400867 case MID_SHUTDOWN:
868 rc = -EHOSTDOWN;
869 break;
Jeff Layton74dd92a2011-01-11 07:24:02 -0500870 default:
Jeff Layton3c1105d2011-05-22 07:09:13 -0400871 list_del_init(&mid->qhead);
Joe Perchesf96637b2013-05-04 22:12:25 -0500872 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
873 __func__, mid->mid, mid->mid_state);
Jeff Layton74dd92a2011-01-11 07:24:02 -0500874 rc = -EIO;
Jeff Layton053d5032011-01-11 07:24:02 -0500875 }
876 spin_unlock(&GlobalMid_Lock);
877
Jeff Layton2b84a36c2011-01-11 07:24:21 -0500878 DeleteMidQEntry(mid);
Jeff Layton053d5032011-01-11 07:24:02 -0500879 return rc;
880}
881
Jeff Layton121b0462012-05-15 12:21:10 -0400882static inline int
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -0800883send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
884 struct mid_q_entry *mid)
Jeff Layton76dcc262011-01-11 07:24:24 -0500885{
Jeff Layton121b0462012-05-15 12:21:10 -0400886 return server->ops->send_cancel ?
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -0800887 server->ops->send_cancel(server, rqst, mid) : 0;
Jeff Layton76dcc262011-01-11 07:24:24 -0500888}
889
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890int
Jeff Layton2c8f9812011-05-19 16:22:52 -0400891cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
892 bool log_error)
893{
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400894 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
Jeff Layton826a95e2011-10-11 06:41:32 -0400895
896 dump_smb(mid->resp_buf, min_t(u32, 92, len));
Jeff Layton2c8f9812011-05-19 16:22:52 -0400897
898 /* convert the length into a more usable form */
Jeff Layton38d77c52013-05-26 07:01:00 -0400899 if (server->sign) {
Pavel Shilovsky738f9de2016-11-23 15:14:57 -0800900 struct kvec iov[2];
Steve French985e4ff02012-08-03 09:42:45 -0500901 int rc = 0;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -0800902 struct smb_rqst rqst = { .rq_iov = iov,
903 .rq_nvec = 2 };
Jeff Layton826a95e2011-10-11 06:41:32 -0400904
Pavel Shilovsky738f9de2016-11-23 15:14:57 -0800905 iov[0].iov_base = mid->resp_buf;
906 iov[0].iov_len = 4;
907 iov[1].iov_base = (char *)mid->resp_buf + 4;
908 iov[1].iov_len = len - 4;
Jeff Layton2c8f9812011-05-19 16:22:52 -0400909 /* FIXME: add code to kill session */
Jeff Laytonbf5ea0e2012-09-18 16:20:34 -0700910 rc = cifs_verify_signature(&rqst, server,
Jeff Layton0124cc42013-04-03 11:55:03 -0400911 mid->sequence_number);
Steve French985e4ff02012-08-03 09:42:45 -0500912 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -0500913 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
914 rc);
Jeff Layton2c8f9812011-05-19 16:22:52 -0400915 }
916
917 /* BB special case reconnect tid and uid here? */
918 return map_smb_to_linux_error(mid->resp_buf, log_error);
919}
920
Jeff Laytonfec344e2012-09-18 16:20:35 -0700921struct mid_q_entry *
922cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400923{
924 int rc;
Jeff Laytonfec344e2012-09-18 16:20:35 -0700925 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400926 struct mid_q_entry *mid;
927
Pavel Shilovsky738f9de2016-11-23 15:14:57 -0800928 if (rqst->rq_iov[0].iov_len != 4 ||
929 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
930 return ERR_PTR(-EIO);
931
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400932 rc = allocate_mid(ses, hdr, &mid);
933 if (rc)
Jeff Laytonfec344e2012-09-18 16:20:35 -0700934 return ERR_PTR(rc);
935 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
936 if (rc) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700937 cifs_delete_mid(mid);
Jeff Laytonfec344e2012-09-18 16:20:35 -0700938 return ERR_PTR(rc);
939 }
940 return mid;
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400941}
942
Ronnie Sahlberg4e34feb2018-08-30 10:13:00 +1000943static void
Pavel Shilovskyee258d72019-01-03 15:53:10 -0800944cifs_compound_callback(struct mid_q_entry *mid)
Pavel Shilovsky8a26f0f2019-01-03 16:45:27 -0800945{
946 struct TCP_Server_Info *server = mid->server;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -0800947 struct cifs_credits credits;
Pavel Shilovsky8a26f0f2019-01-03 16:45:27 -0800948
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -0800949 credits.value = server->ops->get_credits(mid);
950 credits.instance = server->reconnect_instance;
951
952 add_credits(server, &credits, mid->optype);
Pavel Shilovsky8a26f0f2019-01-03 16:45:27 -0800953}
954
Pavel Shilovskyee258d72019-01-03 15:53:10 -0800955static void
956cifs_compound_last_callback(struct mid_q_entry *mid)
957{
958 cifs_compound_callback(mid);
959 cifs_wake_up_task(mid);
960}
961
962static void
963cifs_cancelled_callback(struct mid_q_entry *mid)
964{
965 cifs_compound_callback(mid);
966 DeleteMidQEntry(mid);
967}
968
Pavel Shilovskyb8f57ee2016-11-23 15:31:54 -0800969int
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +1000970compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
971 const int flags, const int num_rqst, struct smb_rqst *rqst,
972 int *resp_buf_type, struct kvec *resp_iov)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973{
Ronnie Sahlberg480b1cb2019-03-08 12:58:18 +1000974 int i, j, optype, rc = 0;
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +1000975 struct mid_q_entry *midQ[MAX_COMPOUND];
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -0800976 bool cancelled_mid[MAX_COMPOUND] = {false};
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -0800977 struct cifs_credits credits[MAX_COMPOUND] = {
978 { .value = 0, .instance = 0 }
979 };
980 unsigned int instance;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -0800981 char *buf;
Aurelien Aptel3190b592019-06-24 13:00:12 -0500982 struct TCP_Server_Info *server;
Steve French50c2f752007-07-13 00:33:32 +0000983
Pavel Shilovskya891f0f2012-05-23 16:14:34 +0400984 optype = flags & CIFS_OP_MASK;
Steve French133672e2007-11-13 22:41:37 +0000985
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +1000986 for (i = 0; i < num_rqst; i++)
987 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
Steve French4b8f9302006-02-26 16:41:18 +0000989 if ((ses == NULL) || (ses->server == NULL)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500990 cifs_dbg(VFS, "Null session\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 return -EIO;
992 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
Aurelien Aptel3190b592019-06-24 13:00:12 -0500994 server = ses->server;
995 if (server->tcpStatus == CifsExiting)
Steve French31ca3bc2005-04-28 22:41:11 -0700996 return -ENOENT;
997
Pavel Shilovsky792af7b2012-03-23 14:28:02 -0400998 /*
Ronnie Sahlberg257b7802019-03-11 12:18:58 +1000999 * Wait for all the requests to become available.
Pavel Shilovsky7091bca2019-01-30 16:58:09 -08001000 * This approach still leaves the possibility to be stuck waiting for
1001 * credits if the server doesn't grant credits to the outstanding
Ronnie Sahlberg257b7802019-03-11 12:18:58 +10001002 * requests and if the client is completely idle, not generating any
1003 * other requests.
1004 * This can be handled by the eventual session reconnect.
Pavel Shilovsky792af7b2012-03-23 14:28:02 -04001005 */
Aurelien Aptel3190b592019-06-24 13:00:12 -05001006 rc = wait_for_compound_request(server, num_rqst, flags,
Ronnie Sahlberg257b7802019-03-11 12:18:58 +10001007 &instance);
1008 if (rc)
1009 return rc;
1010
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -08001011 for (i = 0; i < num_rqst; i++) {
Ronnie Sahlberg257b7802019-03-11 12:18:58 +10001012 credits[i].value = 1;
1013 credits[i].instance = instance;
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -08001014 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001015
Pavel Shilovsky792af7b2012-03-23 14:28:02 -04001016 /*
1017 * Make sure that we sign in the same order that we send on this socket
1018 * and avoid races inside tcp sendmsg code that could cause corruption
1019 * of smb data.
1020 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
Aurelien Aptel3190b592019-06-24 13:00:12 -05001022 mutex_lock(&server->srv_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023
Pavel Shilovsky97ea4992019-01-15 16:07:52 -08001024 /*
1025 * All the parts of the compound chain belong obtained credits from the
Ronnie Sahlberg257b7802019-03-11 12:18:58 +10001026 * same session. We can not use credits obtained from the previous
Pavel Shilovsky97ea4992019-01-15 16:07:52 -08001027 * session to send this request. Check if there were reconnects after
1028 * we obtained credits and return -EAGAIN in such cases to let callers
1029 * handle it.
1030 */
Aurelien Aptel3190b592019-06-24 13:00:12 -05001031 if (instance != server->reconnect_instance) {
1032 mutex_unlock(&server->srv_mutex);
Pavel Shilovsky97ea4992019-01-15 16:07:52 -08001033 for (j = 0; j < num_rqst; j++)
Aurelien Aptel3190b592019-06-24 13:00:12 -05001034 add_credits(server, &credits[j], optype);
Pavel Shilovsky97ea4992019-01-15 16:07:52 -08001035 return -EAGAIN;
1036 }
1037
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001038 for (i = 0; i < num_rqst; i++) {
Aurelien Aptel3190b592019-06-24 13:00:12 -05001039 midQ[i] = server->ops->setup_request(ses, &rqst[i]);
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001040 if (IS_ERR(midQ[i])) {
Aurelien Aptel3190b592019-06-24 13:00:12 -05001041 revert_current_mid(server, i);
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001042 for (j = 0; j < i; j++)
1043 cifs_delete_mid(midQ[j]);
Aurelien Aptel3190b592019-06-24 13:00:12 -05001044 mutex_unlock(&server->srv_mutex);
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -08001045
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001046 /* Update # of requests on wire to server */
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -08001047 for (j = 0; j < num_rqst; j++)
Aurelien Aptel3190b592019-06-24 13:00:12 -05001048 add_credits(server, &credits[j], optype);
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001049 return PTR_ERR(midQ[i]);
1050 }
1051
1052 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
Pavel Shilovsky8a26f0f2019-01-03 16:45:27 -08001053 midQ[i]->optype = optype;
Ronnie Sahlberg4e34feb2018-08-30 10:13:00 +10001054 /*
Pavel Shilovskyee258d72019-01-03 15:53:10 -08001055 * Invoke callback for every part of the compound chain
1056 * to calculate credits properly. Wake up this thread only when
1057 * the last element is received.
Ronnie Sahlberg4e34feb2018-08-30 10:13:00 +10001058 */
1059 if (i < num_rqst - 1)
Pavel Shilovskyee258d72019-01-03 15:53:10 -08001060 midQ[i]->callback = cifs_compound_callback;
1061 else
1062 midQ[i]->callback = cifs_compound_last_callback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 }
Aurelien Aptel3190b592019-06-24 13:00:12 -05001064 cifs_in_send_inc(server);
1065 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1066 cifs_in_send_dec(server);
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001067
1068 for (i = 0; i < num_rqst; i++)
1069 cifs_save_when_sent(midQ[i]);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001070
Pavel Shilovskyc781af72019-03-04 14:02:50 -08001071 if (rc < 0) {
Aurelien Aptel3190b592019-06-24 13:00:12 -05001072 revert_current_mid(server, num_rqst);
1073 server->sequence_number -= 2;
Pavel Shilovskyc781af72019-03-04 14:02:50 -08001074 }
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001075
Aurelien Aptel3190b592019-06-24 13:00:12 -05001076 mutex_unlock(&server->srv_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001077
Ronnie Sahlbergd69cb722019-05-01 12:03:41 +10001078 /*
1079 * If sending failed for some reason or it is an oplock break that we
1080 * will not receive a response to - return credits back
1081 */
1082 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
Pavel Shilovskyee258d72019-01-03 15:53:10 -08001083 for (i = 0; i < num_rqst; i++)
Aurelien Aptel3190b592019-06-24 13:00:12 -05001084 add_credits(server, &credits[i], optype);
Ronnie Sahlbergcb5c2e62018-10-10 15:29:06 +10001085 goto out;
Pavel Shilovskyee258d72019-01-03 15:53:10 -08001086 }
1087
1088 /*
1089 * At this point the request is passed to the network stack - we assume
1090 * that any credits taken from the server structure on the client have
1091 * been spent and we can't return them back. Once we receive responses
1092 * we will collect credits granted by the server in the mid callbacks
1093 * and add those credits to the server structure.
1094 */
Ronnie Sahlbergcb5c2e62018-10-10 15:29:06 +10001095
1096 /*
1097 * Compounding is never used during session establish.
1098 */
1099 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1100 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1101 rqst[0].rq_nvec);
1102
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001103 for (i = 0; i < num_rqst; i++) {
Aurelien Aptel3190b592019-06-24 13:00:12 -05001104 rc = wait_for_response(server, midQ[i]);
Pavel Shilovsky8a26f0f2019-01-03 16:45:27 -08001105 if (rc != 0)
1106 break;
1107 }
1108 if (rc != 0) {
1109 for (; i < num_rqst; i++) {
Steve French43de1db2018-10-23 21:04:57 -05001110 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1111 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
Aurelien Aptel3190b592019-06-24 13:00:12 -05001112 send_cancel(server, &rqst[i], midQ[i]);
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001113 spin_lock(&GlobalMid_Lock);
1114 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1115 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
Pavel Shilovsky8a26f0f2019-01-03 16:45:27 -08001116 midQ[i]->callback = cifs_cancelled_callback;
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -08001117 cancelled_mid[i] = true;
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08001118 credits[i].value = 0;
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001119 }
Jeff Layton1be912d2011-01-28 07:08:28 -05001120 spin_unlock(&GlobalMid_Lock);
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001121 }
Ronnie Sahlbergcb5c2e62018-10-10 15:29:06 +10001122 }
1123
Ronnie Sahlbergcb5c2e62018-10-10 15:29:06 +10001124 for (i = 0; i < num_rqst; i++) {
1125 if (rc < 0)
1126 goto out;
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001127
Aurelien Aptel3190b592019-06-24 13:00:12 -05001128 rc = cifs_sync_mid_result(midQ[i], server);
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001129 if (rc != 0) {
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -08001130 /* mark this mid as cancelled to not free it below */
1131 cancelled_mid[i] = true;
1132 goto out;
Jeff Layton1be912d2011-01-28 07:08:28 -05001133 }
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001134
1135 if (!midQ[i]->resp_buf ||
1136 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1137 rc = -EIO;
1138 cifs_dbg(FYI, "Bad MID state?\n");
1139 goto out;
1140 }
1141
1142 buf = (char *)midQ[i]->resp_buf;
1143 resp_iov[i].iov_base = buf;
1144 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
Aurelien Aptel3190b592019-06-24 13:00:12 -05001145 server->vals->header_preamble_size;
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001146
1147 if (midQ[i]->large_buf)
1148 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1149 else
1150 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1151
Aurelien Aptel3190b592019-06-24 13:00:12 -05001152 rc = server->ops->check_receive(midQ[i], server,
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001153 flags & CIFS_LOG_ERROR);
1154
1155 /* mark it so buf will not be freed by cifs_delete_mid */
Ronnie Sahlberg392e1c52019-05-06 10:00:02 +10001156 if ((flags & CIFS_NO_RSP_BUF) == 0)
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001157 midQ[i]->resp_buf = NULL;
Ronnie Sahlbergcb5c2e62018-10-10 15:29:06 +10001158
Jeff Layton1be912d2011-01-28 07:08:28 -05001159 }
Ronnie Sahlbergcb5c2e62018-10-10 15:29:06 +10001160
1161 /*
1162 * Compounding is never used during session establish.
1163 */
1164 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1165 struct kvec iov = {
1166 .iov_base = resp_iov[0].iov_base,
1167 .iov_len = resp_iov[0].iov_len
1168 };
1169 smb311_update_preauth_hash(ses, &iov, 1);
1170 }
1171
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001172out:
Ronnie Sahlberg4e34feb2018-08-30 10:13:00 +10001173 /*
1174 * This will dequeue all mids. After this it is important that the
1175 * demultiplex_thread will not process any of these mids any futher.
1176 * This is prevented above by using a noop callback that will not
1177 * wake this thread except for the very last PDU.
1178 */
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -08001179 for (i = 0; i < num_rqst; i++) {
1180 if (!cancelled_mid[i])
1181 cifs_delete_mid(midQ[i]);
Pavel Shilovsky8544f4a2018-12-22 12:40:05 -08001182 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
1184 return rc;
1185}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
1187int
Ronnie Sahlberge0bba0b82018-08-01 09:26:13 +10001188cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1189 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1190 struct kvec *resp_iov)
1191{
1192 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1193 resp_iov);
1194}
1195
1196int
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08001197SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1198 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1199 const int flags, struct kvec *resp_iov)
1200{
1201 struct smb_rqst rqst;
Ronnie Sahlberg3cecf482017-11-21 15:08:07 +11001202 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08001203 int rc;
1204
Ronnie Sahlberg3cecf482017-11-21 15:08:07 +11001205 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
Kees Cook6da2ec52018-06-12 13:55:00 -07001206 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1207 GFP_KERNEL);
Steve French117e3b72018-04-22 10:24:19 -05001208 if (!new_iov) {
1209 /* otherwise cifs_send_recv below sets resp_buf_type */
1210 *resp_buf_type = CIFS_NO_BUFFER;
Ronnie Sahlberg3cecf482017-11-21 15:08:07 +11001211 return -ENOMEM;
Steve French117e3b72018-04-22 10:24:19 -05001212 }
Ronnie Sahlberg3cecf482017-11-21 15:08:07 +11001213 } else
1214 new_iov = s_iov;
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08001215
1216 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1217 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1218
1219 new_iov[0].iov_base = new_iov[1].iov_base;
1220 new_iov[0].iov_len = 4;
1221 new_iov[1].iov_base += 4;
1222 new_iov[1].iov_len -= 4;
1223
1224 memset(&rqst, 0, sizeof(struct smb_rqst));
1225 rqst.rq_iov = new_iov;
1226 rqst.rq_nvec = n_vec + 1;
1227
1228 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
Ronnie Sahlberg3cecf482017-11-21 15:08:07 +11001229 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1230 kfree(new_iov);
Pavel Shilovsky738f9de2016-11-23 15:14:57 -08001231 return rc;
1232}
1233
1234int
Steve French96daf2b2011-05-27 04:34:02 +00001235SendReceive(const unsigned int xid, struct cifs_ses *ses,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
Ronnie Sahlberg480b1cb2019-03-08 12:58:18 +10001237 int *pbytes_returned, const int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238{
1239 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 struct mid_q_entry *midQ;
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001241 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1242 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1243 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08001244 struct cifs_credits credits = { .value = 1, .instance = 0 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
1246 if (ses == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001247 cifs_dbg(VFS, "Null smb session\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 return -EIO;
1249 }
Steve French79a58d12007-07-06 22:44:50 +00001250 if (ses->server == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001251 cifs_dbg(VFS, "Null tcp session\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 return -EIO;
1253 }
1254
Steve French79a58d12007-07-06 22:44:50 +00001255 if (ses->server->tcpStatus == CifsExiting)
Steve French31ca3bc2005-04-28 22:41:11 -07001256 return -ENOENT;
1257
Steve French79a58d12007-07-06 22:44:50 +00001258 /* Ensure that we do not send more than 50 overlapping requests
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 to the same server. We may make this configurable later or
1260 use ses->maxReq */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001262 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001263 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001264 len);
Volker Lendecke6d9c6d52008-12-08 20:50:24 +00001265 return -EIO;
1266 }
1267
Ronnie Sahlberg480b1cb2019-03-08 12:58:18 +10001268 rc = wait_for_free_request(ses->server, flags, &credits.instance);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001269 if (rc)
1270 return rc;
1271
Steve French79a58d12007-07-06 22:44:50 +00001272 /* make sure that we sign in the same order that we send on this socket
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 and avoid races inside tcp sendmsg code that could cause corruption
1274 of smb data */
1275
Jeff Layton72ca5452008-12-01 07:09:36 -05001276 mutex_lock(&ses->server->srv_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001278 rc = allocate_mid(ses, in_buf, &midQ);
1279 if (rc) {
Jeff Layton72ca5452008-12-01 07:09:36 -05001280 mutex_unlock(&ses->server->srv_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001281 /* Update # of requests on wire to server */
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08001282 add_credits(ses->server, &credits, 0);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001283 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 }
1285
Steve Frenchad009ac2005-04-28 22:41:05 -07001286 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
Volker Lendecke829049c2008-12-06 16:00:53 +01001287 if (rc) {
1288 mutex_unlock(&ses->server->srv_mutex);
1289 goto out;
1290 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -04001292 midQ->mid_state = MID_REQUEST_SUBMITTED;
Steve French789e6662011-08-09 18:44:44 +00001293
1294 cifs_in_send_inc(ses->server);
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001295 rc = smb_send(ses->server, in_buf, len);
Steve French789e6662011-08-09 18:44:44 +00001296 cifs_in_send_dec(ses->server);
1297 cifs_save_when_sent(midQ);
Jeff Laytonad313cb2013-04-03 10:27:36 -04001298
1299 if (rc < 0)
1300 ses->server->sequence_number -= 2;
1301
Jeff Layton72ca5452008-12-01 07:09:36 -05001302 mutex_unlock(&ses->server->srv_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001303
Steve French79a58d12007-07-06 22:44:50 +00001304 if (rc < 0)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001305 goto out;
1306
Jeff Layton0ade6402011-01-11 07:24:02 -05001307 rc = wait_for_response(ses->server, midQ);
Jeff Layton1be912d2011-01-28 07:08:28 -05001308 if (rc != 0) {
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001309 send_cancel(ses->server, &rqst, midQ);
Jeff Layton1be912d2011-01-28 07:08:28 -05001310 spin_lock(&GlobalMid_Lock);
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -04001311 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
Jeff Layton1be912d2011-01-28 07:08:28 -05001312 /* no longer considered to be "in-flight" */
1313 midQ->callback = DeleteMidQEntry;
1314 spin_unlock(&GlobalMid_Lock);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08001315 add_credits(ses->server, &credits, 0);
Jeff Layton1be912d2011-01-28 07:08:28 -05001316 return rc;
1317 }
1318 spin_unlock(&GlobalMid_Lock);
1319 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
Jeff Layton3c1105d2011-05-22 07:09:13 -04001321 rc = cifs_sync_mid_result(midQ, ses->server);
Jeff Layton053d5032011-01-11 07:24:02 -05001322 if (rc != 0) {
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08001323 add_credits(ses->server, &credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 return rc;
1325 }
Steve French50c2f752007-07-13 00:33:32 +00001326
Jeff Layton2c8f9812011-05-19 16:22:52 -04001327 if (!midQ->resp_buf || !out_buf ||
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -04001328 midQ->mid_state != MID_RESPONSE_RECEIVED) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 rc = -EIO;
Joe Perchesf96637b2013-05-04 22:12:25 -05001330 cifs_dbg(VFS, "Bad MID state?\n");
Steve French2b2bdfb2008-12-11 17:26:54 +00001331 goto out;
1332 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
Pavel Shilovskyd4e48542012-03-23 14:28:02 -04001334 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
Jeff Layton2c8f9812011-05-19 16:22:52 -04001335 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1336 rc = cifs_check_receive(midQ, ses->server, 0);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001337out:
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001338 cifs_delete_mid(midQ);
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08001339 add_credits(ses->server, &credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340
1341 return rc;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001342}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001344/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1345 blocking lock to return. */
1346
1347static int
Steve French96daf2b2011-05-27 04:34:02 +00001348send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001349 struct smb_hdr *in_buf,
1350 struct smb_hdr *out_buf)
1351{
1352 int bytes_returned;
Steve French96daf2b2011-05-27 04:34:02 +00001353 struct cifs_ses *ses = tcon->ses;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001354 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1355
1356 /* We just modify the current in_buf to change
1357 the type of lock from LOCKING_ANDX_SHARED_LOCK
1358 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1359 LOCKING_ANDX_CANCEL_LOCK. */
1360
1361 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1362 pSMB->Timeout = 0;
Pavel Shilovsky88257362012-05-23 14:01:59 +04001363 pSMB->hdr.Mid = get_next_mid(ses->server);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001364
1365 return SendReceive(xid, ses, in_buf, out_buf,
Jeff Layton77499812011-01-11 07:24:23 -05001366 &bytes_returned, 0);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001367}
1368
1369int
Steve French96daf2b2011-05-27 04:34:02 +00001370SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001371 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1372 int *pbytes_returned)
1373{
1374 int rc = 0;
1375 int rstart = 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001376 struct mid_q_entry *midQ;
Steve French96daf2b2011-05-27 04:34:02 +00001377 struct cifs_ses *ses;
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001378 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1379 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1380 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
Pavel Shilovsky34f4deb2019-01-16 11:22:29 -08001381 unsigned int instance;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001382
1383 if (tcon == NULL || tcon->ses == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001384 cifs_dbg(VFS, "Null smb session\n");
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001385 return -EIO;
1386 }
1387 ses = tcon->ses;
1388
Steve French79a58d12007-07-06 22:44:50 +00001389 if (ses->server == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001390 cifs_dbg(VFS, "Null tcp session\n");
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001391 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 }
1393
Steve French79a58d12007-07-06 22:44:50 +00001394 if (ses->server->tcpStatus == CifsExiting)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001395 return -ENOENT;
1396
Steve French79a58d12007-07-06 22:44:50 +00001397 /* Ensure that we do not send more than 50 overlapping requests
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001398 to the same server. We may make this configurable later or
1399 use ses->maxReq */
1400
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001401 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001402 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001403 len);
Volker Lendecke6d9c6d52008-12-08 20:50:24 +00001404 return -EIO;
1405 }
1406
Ronnie Sahlberg480b1cb2019-03-08 12:58:18 +10001407 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, &instance);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001408 if (rc)
1409 return rc;
1410
Steve French79a58d12007-07-06 22:44:50 +00001411 /* make sure that we sign in the same order that we send on this socket
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001412 and avoid races inside tcp sendmsg code that could cause corruption
1413 of smb data */
1414
Jeff Layton72ca5452008-12-01 07:09:36 -05001415 mutex_lock(&ses->server->srv_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001416
1417 rc = allocate_mid(ses, in_buf, &midQ);
1418 if (rc) {
Jeff Layton72ca5452008-12-01 07:09:36 -05001419 mutex_unlock(&ses->server->srv_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001420 return rc;
1421 }
1422
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001423 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
Volker Lendecke829049c2008-12-06 16:00:53 +01001424 if (rc) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001425 cifs_delete_mid(midQ);
Volker Lendecke829049c2008-12-06 16:00:53 +01001426 mutex_unlock(&ses->server->srv_mutex);
1427 return rc;
1428 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001429
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -04001430 midQ->mid_state = MID_REQUEST_SUBMITTED;
Steve French789e6662011-08-09 18:44:44 +00001431 cifs_in_send_inc(ses->server);
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001432 rc = smb_send(ses->server, in_buf, len);
Steve French789e6662011-08-09 18:44:44 +00001433 cifs_in_send_dec(ses->server);
1434 cifs_save_when_sent(midQ);
Jeff Laytonad313cb2013-04-03 10:27:36 -04001435
1436 if (rc < 0)
1437 ses->server->sequence_number -= 2;
1438
Jeff Layton72ca5452008-12-01 07:09:36 -05001439 mutex_unlock(&ses->server->srv_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001440
Steve French79a58d12007-07-06 22:44:50 +00001441 if (rc < 0) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001442 cifs_delete_mid(midQ);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001443 return rc;
1444 }
1445
1446 /* Wait for a reply - allow signals to interrupt. */
1447 rc = wait_event_interruptible(ses->server->response_q,
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -04001448 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001449 ((ses->server->tcpStatus != CifsGood) &&
1450 (ses->server->tcpStatus != CifsNew)));
1451
1452 /* Were we interrupted by a signal ? */
1453 if ((rc == -ERESTARTSYS) &&
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -04001454 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001455 ((ses->server->tcpStatus == CifsGood) ||
1456 (ses->server->tcpStatus == CifsNew))) {
1457
1458 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1459 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1460 blocking lock to return. */
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001461 rc = send_cancel(ses->server, &rqst, midQ);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001462 if (rc) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001463 cifs_delete_mid(midQ);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001464 return rc;
1465 }
1466 } else {
1467 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1468 to cause the blocking lock to return. */
1469
1470 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1471
1472 /* If we get -ENOLCK back the lock may have
1473 already been removed. Don't exit in this case. */
1474 if (rc && rc != -ENOLCK) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001475 cifs_delete_mid(midQ);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001476 return rc;
1477 }
1478 }
1479
Jeff Layton1be912d2011-01-28 07:08:28 -05001480 rc = wait_for_response(ses->server, midQ);
1481 if (rc) {
Pavel Shilovskyfb2036d2016-11-23 15:08:14 -08001482 send_cancel(ses->server, &rqst, midQ);
Jeff Layton1be912d2011-01-28 07:08:28 -05001483 spin_lock(&GlobalMid_Lock);
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -04001484 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
Jeff Layton1be912d2011-01-28 07:08:28 -05001485 /* no longer considered to be "in-flight" */
1486 midQ->callback = DeleteMidQEntry;
1487 spin_unlock(&GlobalMid_Lock);
1488 return rc;
1489 }
1490 spin_unlock(&GlobalMid_Lock);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001491 }
Jeff Layton1be912d2011-01-28 07:08:28 -05001492
1493 /* We got the response - restart system call. */
1494 rstart = 1;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001495 }
1496
Jeff Layton3c1105d2011-05-22 07:09:13 -04001497 rc = cifs_sync_mid_result(midQ, ses->server);
Jeff Layton053d5032011-01-11 07:24:02 -05001498 if (rc != 0)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001499 return rc;
Steve French50c2f752007-07-13 00:33:32 +00001500
Volker Lendecke17c8bfe2008-12-06 16:38:19 +01001501 /* rcvd frame is ok */
Pavel Shilovsky7c9421e2012-03-23 14:28:03 -04001502 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
Volker Lendecke17c8bfe2008-12-06 16:38:19 +01001503 rc = -EIO;
Joe Perchesf96637b2013-05-04 22:12:25 -05001504 cifs_dbg(VFS, "Bad MID state?\n");
Volker Lendecke698e96a2008-12-06 16:39:31 +01001505 goto out;
Volker Lendecke17c8bfe2008-12-06 16:38:19 +01001506 }
1507
Pavel Shilovskyd4e48542012-03-23 14:28:02 -04001508 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
Jeff Layton2c8f9812011-05-19 16:22:52 -04001509 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1510 rc = cifs_check_receive(midQ, ses->server, 0);
Volker Lendecke17c8bfe2008-12-06 16:38:19 +01001511out:
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001512 cifs_delete_mid(midQ);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001513 if (rstart && rc == -EACCES)
1514 return -ERESTARTSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 return rc;
1516}