blob: 043288b5c728a958dae722ace99a8bdd6a7cd52f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Nikolay Borisovf86196e2019-01-03 15:29:02 -080036#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/div64.h>
38#include "cifsfs.h"
39#include "cifspdu.h"
40#include "cifsglob.h"
41#include "cifsproto.h"
42#include "cifs_unicode.h"
43#include "cifs_debug.h"
44#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053045#include "fscache.h"
Long Libd3dcc62017-11-22 17:38:47 -070046#include "smbdirect.h"
Steve French07b92d02013-02-18 10:34:26 -060047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048static inline int cifs_convert_flags(unsigned int flags)
49{
50 if ((flags & O_ACCMODE) == O_RDONLY)
51 return GENERIC_READ;
52 else if ((flags & O_ACCMODE) == O_WRONLY)
53 return GENERIC_WRITE;
54 else if ((flags & O_ACCMODE) == O_RDWR) {
55 /* GENERIC_ALL is too much permission to request
56 can cause unnecessary access denied on create */
57 /* return GENERIC_ALL; */
58 return (GENERIC_READ | GENERIC_WRITE);
59 }
60
Jeff Laytone10f7b52008-05-14 10:21:33 -070061 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
62 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
63 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000064}
Jeff Laytone10f7b52008-05-14 10:21:33 -070065
Jeff Layton608712f2010-10-15 15:33:56 -040066static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000067{
Jeff Layton608712f2010-10-15 15:33:56 -040068 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070069
Steve French7fc8f4e2009-02-23 20:43:11 +000070 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000072 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040073 posix_flags = SMB_O_WRONLY;
74 else if ((flags & O_ACCMODE) == O_RDWR)
75 posix_flags = SMB_O_RDWR;
76
Steve French07b92d02013-02-18 10:34:26 -060077 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040078 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060079 if (flags & O_EXCL)
80 posix_flags |= SMB_O_EXCL;
81 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050082 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
83 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060084
Jeff Layton608712f2010-10-15 15:33:56 -040085 if (flags & O_TRUNC)
86 posix_flags |= SMB_O_TRUNC;
87 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010088 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000090 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040091 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000092 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040093 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000094 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040095 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000096
97 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098}
99
100static inline int cifs_get_disposition(unsigned int flags)
101{
102 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
103 return FILE_CREATE;
104 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
105 return FILE_OVERWRITE_IF;
106 else if ((flags & O_CREAT) == O_CREAT)
107 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000108 else if ((flags & O_TRUNC) == O_TRUNC)
109 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 else
111 return FILE_OPEN;
112}
113
Jeff Layton608712f2010-10-15 15:33:56 -0400114int cifs_posix_open(char *full_path, struct inode **pinode,
115 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400116 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400117{
118 int rc;
119 FILE_UNIX_BASIC_INFO *presp_data;
120 __u32 posix_flags = 0;
121 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
122 struct cifs_fattr fattr;
123 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000124 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400125
Joe Perchesf96637b2013-05-04 22:12:25 -0500126 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400127
128 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
129 if (presp_data == NULL)
130 return -ENOMEM;
131
132 tlink = cifs_sb_tlink(cifs_sb);
133 if (IS_ERR(tlink)) {
134 rc = PTR_ERR(tlink);
135 goto posix_open_ret;
136 }
137
138 tcon = tlink_tcon(tlink);
139 mode &= ~current_umask();
140
141 posix_flags = cifs_posix_convert_flags(f_flags);
142 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
143 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900144 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153 if (!pinode)
154 goto posix_open_ret; /* caller does not need info */
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158 /* get new inode and set it up */
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
167 cifs_fattr_to_inode(*pinode, &fattr);
168 }
169
170posix_open_ret:
171 kfree(presp_data);
172 return rc;
173}
174
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300175static int
176cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179{
180 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300182 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500183 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300184 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700185 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400186 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300187
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700188 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700189 return -ENOSYS;
190
191 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300192
193/*********************************************************************
194 * open flag mapping table:
195 *
196 * POSIX Flag CIFS Disposition
197 * ---------- ----------------
198 * O_CREAT FILE_OPEN_IF
199 * O_CREAT | O_EXCL FILE_CREATE
200 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
201 * O_TRUNC FILE_OVERWRITE
202 * none of the above FILE_OPEN
203 *
204 * Note that there is not a direct match between disposition
205 * FILE_SUPERSEDE (ie create whether or not file exists although
206 * O_CREAT | O_TRUNC is similar but truncates the existing
207 * file rather than creating a new file as FILE_SUPERSEDE does
208 * (which uses the attributes / metadata passed in on open call)
209 *?
210 *? O_SYNC is a reasonable match to CIFS writethrough flag
211 *? and the read write flags match reasonably. O_LARGEFILE
212 *? is irrelevant because largefile support is always used
213 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
214 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
215 *********************************************************************/
216
217 disposition = cifs_get_disposition(f_flags);
218
219 /* BB pass O_SYNC flag through on file attributes .. BB */
220
221 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500225 if (backup_cred(cifs_sb))
226 create_options |= CREATE_OPEN_BACKUP_INTENT;
227
Steve French1013e762017-09-22 01:40:27 -0500228 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
229 if (f_flags & O_SYNC)
230 create_options |= CREATE_WRITE_THROUGH;
231
232 if (f_flags & O_DIRECT)
233 create_options |= CREATE_NO_BUFFER;
234
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400235 oparms.tcon = tcon;
236 oparms.cifs_sb = cifs_sb;
237 oparms.desired_access = desired_access;
238 oparms.create_options = create_options;
239 oparms.disposition = disposition;
240 oparms.path = full_path;
241 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400242 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400243
244 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300245
246 if (rc)
247 goto out;
248
249 if (tcon->unix_ext)
250 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
251 xid);
252 else
253 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600254 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300255
Pavel Shilovsky30573a822019-09-30 10:06:18 -0700256 if (rc) {
257 server->ops->close(xid, tcon, fid);
258 if (rc == -ESTALE)
259 rc = -EOPENSTALE;
260 }
261
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300262out:
263 kfree(buf);
264 return rc;
265}
266
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400267static bool
268cifs_has_mand_locks(struct cifsInodeInfo *cinode)
269{
270 struct cifs_fid_locks *cur;
271 bool has_locks = false;
272
273 down_read(&cinode->lock_sem);
274 list_for_each_entry(cur, &cinode->llist, llist) {
275 if (!list_empty(&cur->locks)) {
276 has_locks = true;
277 break;
278 }
279 }
280 up_read(&cinode->lock_sem);
281 return has_locks;
282}
283
Dave Wysochanskid46b0da2019-10-23 05:02:33 -0400284void
285cifs_down_write(struct rw_semaphore *sem)
286{
287 while (!down_write_trylock(sem))
288 msleep(10);
289}
290
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000291static void cifsFileInfo_put_work(struct work_struct *work);
292
Jeff Layton15ecb432010-10-15 15:34:02 -0400293struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700294cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400295 struct tcon_link *tlink, __u32 oplock)
296{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500297 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000298 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700299 struct cifsInodeInfo *cinode = CIFS_I(inode);
300 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700301 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700302 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400303 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400304
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700305 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
306 if (cfile == NULL)
307 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400308
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700309 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
310 if (!fdlocks) {
311 kfree(cfile);
312 return NULL;
313 }
314
315 INIT_LIST_HEAD(&fdlocks->locks);
316 fdlocks->cfile = cfile;
317 cfile->llist = fdlocks;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700318
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700319 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700320 cfile->pid = current->tgid;
321 cfile->uid = current_fsuid();
322 cfile->dentry = dget(dentry);
323 cfile->f_flags = file->f_flags;
324 cfile->invalidHandle = false;
325 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700326 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000327 INIT_WORK(&cfile->put, cifsFileInfo_put_work);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700328 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500329 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400330
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100331 cifs_sb_active(inode->i_sb);
332
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400333 /*
334 * If the server returned a read oplock and we have mandatory brlocks,
335 * set oplock level to None.
336 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400337 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500338 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400339 oplock = 0;
340 }
341
Pavel Shilovsky6f582b22019-11-27 16:18:39 -0800342 cifs_down_write(&cinode->lock_sem);
343 list_add(&fdlocks->llist, &cinode->llist);
344 up_write(&cinode->lock_sem);
345
Steve French3afca262016-09-22 18:58:16 -0500346 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400347 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700348 oplock = fid->pending_open->oplock;
349 list_del(&fid->pending_open->olist);
350
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400351 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400352 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700353
354 list_add(&cfile->tlist, &tcon->openFileList);
Steve Frenchfae80442018-10-19 17:14:32 -0500355 atomic_inc(&tcon->num_local_opens);
Steve French3afca262016-09-22 18:58:16 -0500356
Jeff Layton15ecb432010-10-15 15:34:02 -0400357 /* if readable file instance put first in list*/
Ronnie Sahlberg487317c2019-06-05 10:38:38 +1000358 spin_lock(&cinode->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400359 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700360 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400361 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700362 list_add_tail(&cfile->flist, &cinode->openFileList);
Ronnie Sahlberg487317c2019-06-05 10:38:38 +1000363 spin_unlock(&cinode->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500364 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400365
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400366 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400367 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400368
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700369 file->private_data = cfile;
370 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400371}
372
Jeff Layton764a1b12012-07-25 14:59:54 -0400373struct cifsFileInfo *
374cifsFileInfo_get(struct cifsFileInfo *cifs_file)
375{
Steve French3afca262016-09-22 18:58:16 -0500376 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400377 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500378 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400379 return cifs_file;
380}
381
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000382static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
383{
384 struct inode *inode = d_inode(cifs_file->dentry);
385 struct cifsInodeInfo *cifsi = CIFS_I(inode);
386 struct cifsLockInfo *li, *tmp;
387 struct super_block *sb = inode->i_sb;
388
389 /*
390 * Delete any outstanding lock records. We'll lose them when the file
391 * is closed anyway.
392 */
393 cifs_down_write(&cifsi->lock_sem);
394 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
395 list_del(&li->llist);
396 cifs_del_lock_waiters(li);
397 kfree(li);
398 }
399 list_del(&cifs_file->llist->llist);
400 kfree(cifs_file->llist);
401 up_write(&cifsi->lock_sem);
402
403 cifs_put_tlink(cifs_file->tlink);
404 dput(cifs_file->dentry);
405 cifs_sb_deactive(sb);
406 kfree(cifs_file);
407}
408
409static void cifsFileInfo_put_work(struct work_struct *work)
410{
411 struct cifsFileInfo *cifs_file = container_of(work,
412 struct cifsFileInfo, put);
413
414 cifsFileInfo_put_final(cifs_file);
415}
416
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100417/**
418 * cifsFileInfo_put - release a reference of file priv data
419 *
420 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
Steve Frenchcdff08e2010-10-21 22:46:14 +0000421 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400422void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
423{
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000424 _cifsFileInfo_put(cifs_file, true, true);
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100425}
426
427/**
428 * _cifsFileInfo_put - release a reference of file priv data
429 *
430 * This may involve closing the filehandle @cifs_file out on the
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000431 * server. Must be called without holding tcon->open_file_lock,
432 * cinode->open_file_lock and cifs_file->file_info_lock.
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100433 *
434 * If @wait_for_oplock_handler is true and we are releasing the last
435 * reference, wait for any running oplock break handler of the file
436 * and cancel any pending one. If calling this function from the
437 * oplock break handler, you need to pass false.
438 *
439 */
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000440void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
441 bool wait_oplock_handler, bool offload)
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100442{
David Howells2b0143b2015-03-17 22:25:59 +0000443 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000444 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700445 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300446 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100447 struct super_block *sb = inode->i_sb;
448 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700449 struct cifs_fid fid;
450 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000451 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000452
Steve French3afca262016-09-22 18:58:16 -0500453 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky1a67c412019-10-23 15:37:19 -0700454 spin_lock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500455 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400456 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500457 spin_unlock(&cifs_file->file_info_lock);
Pavel Shilovsky1a67c412019-10-23 15:37:19 -0700458 spin_unlock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500459 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000460 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400461 }
Steve French3afca262016-09-22 18:58:16 -0500462 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000463
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700464 if (server->ops->get_lease_key)
465 server->ops->get_lease_key(inode, &fid);
466
467 /* store open in pending opens to make sure we don't miss lease break */
468 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
469
Steve Frenchcdff08e2010-10-21 22:46:14 +0000470 /* remove it from the lists */
471 list_del(&cifs_file->flist);
472 list_del(&cifs_file->tlist);
Steve Frenchfae80442018-10-19 17:14:32 -0500473 atomic_dec(&tcon->num_local_opens);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000474
475 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500476 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000477 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700478 /*
479 * In strict cache mode we need invalidate mapping on the last
480 * close because it may cause a error when we open this file
481 * again and get at least level II oplock.
482 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300483 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400484 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300485 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000486 }
Steve French3afca262016-09-22 18:58:16 -0500487
Pavel Shilovsky1a67c412019-10-23 15:37:19 -0700488 spin_unlock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500489 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000490
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100491 oplock_break_cancelled = wait_oplock_handler ?
492 cancel_work_sync(&cifs_file->oplock_break) : false;
Jeff Laytonad635942011-07-26 12:20:17 -0400493
Steve Frenchcdff08e2010-10-21 22:46:14 +0000494 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700495 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400496 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700497
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400498 xid = get_xid();
Steve French43f8a6a2019-12-02 21:46:54 -0600499 if (server->ops->close_getattr)
500 server->ops->close_getattr(xid, tcon, cifs_file);
501 else if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400502 server->ops->close(xid, tcon, &cifs_file->fid);
503 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000504 }
505
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000506 if (oplock_break_cancelled)
507 cifs_done_oplock_break(cifsi);
508
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700509 cifs_del_pending_open(&open);
510
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000511 if (offload)
512 queue_work(fileinfo_put_wq, &cifs_file->put);
513 else
514 cifsFileInfo_put_final(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400515}
516
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700518
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519{
520 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400521 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400522 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700524 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000525 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400526 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700527 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300529 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700530 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700531 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400533 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
535 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400536 tlink = cifs_sb_tlink(cifs_sb);
537 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400538 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400539 return PTR_ERR(tlink);
540 }
541 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700542 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500544 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530546 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400547 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 }
549
Joe Perchesf96637b2013-05-04 22:12:25 -0500550 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000551 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000552
Namjae Jeon787aded2014-08-22 14:22:51 +0900553 if (file->f_flags & O_DIRECT &&
554 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
555 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
556 file->f_op = &cifs_file_direct_nobrl_ops;
557 else
558 file->f_op = &cifs_file_direct_ops;
559 }
560
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700561 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000562 oplock = REQ_OPLOCK;
563 else
564 oplock = 0;
565
Steve French64cc2c62009-03-04 19:54:08 +0000566 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400567 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
568 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000569 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400570 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000571 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700572 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000573 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500574 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300575 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000576 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
577 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500578 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
579 tcon->ses->serverName,
580 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000581 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000582 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
583 (rc != -EOPNOTSUPP)) /* path not found or net err */
584 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700585 /*
586 * Else fallthrough to retry open the old way on network i/o
587 * or DFS errors.
588 */
Steve French276a74a2009-03-03 18:00:34 +0000589 }
590
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700591 if (server->ops->get_lease_key)
592 server->ops->get_lease_key(inode, &fid);
593
594 cifs_add_pending_open(&fid, tlink, &open);
595
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300596 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700597 if (server->ops->get_lease_key)
598 server->ops->get_lease_key(inode, &fid);
599
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300600 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700601 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700602 if (rc) {
603 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300604 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700605 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300606 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400607
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700608 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
609 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700610 if (server->ops->close)
611 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700612 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 rc = -ENOMEM;
614 goto out;
615 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530617 cifs_fscache_set_inode_cookie(inode, file);
618
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300619 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700620 /*
621 * Time to set mode which we can not set earlier due to
622 * problems creating new read-only files.
623 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300624 struct cifs_unix_set_info_args args = {
625 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800626 .uid = INVALID_UID, /* no change */
627 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300628 .ctime = NO_CHANGE_64,
629 .atime = NO_CHANGE_64,
630 .mtime = NO_CHANGE_64,
631 .device = 0,
632 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700633 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
634 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 }
636
637out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400639 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400640 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 return rc;
642}
643
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400644static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
645
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700646/*
647 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400648 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700649 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400650static int
651cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400653 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000654 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400655 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 int rc = 0;
657
Rabin Vincent560d3882017-05-03 17:17:21 +0200658 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400659 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400660 /* can cache locks - no need to relock */
661 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400662 return rc;
663 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400665 if (cap_unix(tcon->ses) &&
666 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
667 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
668 rc = cifs_push_posix_locks(cfile);
669 else
670 rc = tcon->ses->server->ops->push_mand_locks(cfile);
671
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400672 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 return rc;
674}
675
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700676static int
677cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678{
679 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400680 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400681 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000683 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700684 struct TCP_Server_Info *server;
685 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000686 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700688 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500690 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400691 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400693 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700694 mutex_lock(&cfile->fh_mutex);
695 if (!cfile->invalidHandle) {
696 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530697 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400698 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530699 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 }
701
David Howells2b0143b2015-03-17 22:25:59 +0000702 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700704 tcon = tlink_tcon(cfile->tlink);
705 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000706
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700707 /*
708 * Can not grab rename sem here because various ops, including those
709 * that already have the rename sem can end up causing writepage to get
710 * called and if the server was down that means we end up here, and we
711 * can never tell if the caller already has the rename_sem.
712 */
713 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000715 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700716 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400717 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000718 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 }
720
Joe Perchesf96637b2013-05-04 22:12:25 -0500721 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
722 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300724 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 oplock = REQ_OPLOCK;
726 else
Steve French4b18f2a2008-04-29 00:06:05 +0000727 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400729 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000730 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400731 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400732 /*
733 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
734 * original open. Must mask them off for a reopen.
735 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700736 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400737 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400738
Jeff Layton2422f672010-06-16 13:40:16 -0400739 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700740 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400741 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000742 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500743 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200744 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000745 goto reopen_success;
746 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700747 /*
748 * fallthrough to retry open the old way on errors, especially
749 * in the reconnect path it is important to retry hard
750 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000751 }
752
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700753 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000754
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500755 if (backup_cred(cifs_sb))
756 create_options |= CREATE_OPEN_BACKUP_INTENT;
757
Pavel Shilovsky44805b02019-11-12 17:16:35 -0800758 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
759 if (cfile->f_flags & O_SYNC)
760 create_options |= CREATE_WRITE_THROUGH;
761
762 if (cfile->f_flags & O_DIRECT)
763 create_options |= CREATE_NO_BUFFER;
764
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700765 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400766 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700767
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400768 oparms.tcon = tcon;
769 oparms.cifs_sb = cifs_sb;
770 oparms.desired_access = desired_access;
771 oparms.create_options = create_options;
772 oparms.disposition = disposition;
773 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400774 oparms.fid = &cfile->fid;
775 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400776
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700777 /*
778 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400779 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700780 * file might have write behind data that needs to be flushed and server
781 * version of file size can be stale. If we knew for sure that inode was
782 * not dirty locally we could do this.
783 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400784 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400785 if (rc == -ENOENT && oparms.reconnect == false) {
786 /* durable handle timeout is expired - open the file again */
787 rc = server->ops->open(xid, &oparms, &oplock, NULL);
788 /* indicate that we need to relock the file */
789 oparms.reconnect = true;
790 }
791
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700793 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500794 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
795 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400796 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 }
Jeff Layton15886172010-10-15 15:33:59 -0400798
799reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700800 cfile->invalidHandle = false;
801 mutex_unlock(&cfile->fh_mutex);
802 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400803
804 if (can_flush) {
805 rc = filemap_write_and_wait(inode->i_mapping);
Pavel Shilovsky9a663962019-01-08 11:15:28 -0800806 if (!is_interrupt_error(rc))
807 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400808
Jeff Layton15886172010-10-15 15:33:59 -0400809 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700810 rc = cifs_get_inode_info_unix(&inode, full_path,
811 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400812 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700813 rc = cifs_get_inode_info(&inode, full_path, NULL,
814 inode->i_sb, xid, NULL);
815 }
816 /*
817 * Else we are writing out data to server already and could deadlock if
818 * we tried to flush data, and since we do not know if we have data that
819 * would invalidate the current end of file on the server we can not go
820 * to the server to get the new inode info.
821 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300822
Pavel Shilovskyde740252016-10-11 15:34:07 -0700823 /*
824 * If the server returned a read oplock and we have mandatory brlocks,
825 * set oplock level to None.
826 */
827 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
828 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
829 oplock = 0;
830 }
831
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400832 server->ops->set_fid(cfile, &cfile->fid, oplock);
833 if (oparms.reconnect)
834 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400835
836reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400838 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 return rc;
840}
841
842int cifs_close(struct inode *inode, struct file *file)
843{
Jeff Layton77970692011-04-05 16:23:47 -0700844 if (file->private_data != NULL) {
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000845 _cifsFileInfo_put(file->private_data, true, false);
Jeff Layton77970692011-04-05 16:23:47 -0700846 file->private_data = NULL;
847 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
Steve Frenchcdff08e2010-10-21 22:46:14 +0000849 /* return code from the ->release op is always ignored */
850 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851}
852
Steve French52ace1e2016-09-22 19:23:56 -0500853void
854cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
855{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700856 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500857 struct list_head *tmp;
858 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700859 struct list_head tmp_list;
860
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800861 if (!tcon->use_persistent || !tcon->need_reopen_files)
862 return;
863
864 tcon->need_reopen_files = false;
865
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700866 cifs_dbg(FYI, "Reopen persistent handles");
867 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500868
869 /* list all files open on tree connection, reopen resilient handles */
870 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700871 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500872 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700873 if (!open_file->invalidHandle)
874 continue;
875 cifsFileInfo_get(open_file);
876 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500877 }
878 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700879
880 list_for_each_safe(tmp, tmp1, &tmp_list) {
881 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800882 if (cifs_reopen_file(open_file, false /* do not flush */))
883 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700884 list_del_init(&open_file->rlist);
885 cifsFileInfo_put(open_file);
886 }
Steve French52ace1e2016-09-22 19:23:56 -0500887}
888
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889int cifs_closedir(struct inode *inode, struct file *file)
890{
891 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400892 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700893 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700894 struct cifs_tcon *tcon;
895 struct TCP_Server_Info *server;
896 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
Joe Perchesf96637b2013-05-04 22:12:25 -0500898 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700900 if (cfile == NULL)
901 return rc;
902
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400903 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700904 tcon = tlink_tcon(cfile->tlink);
905 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906
Joe Perchesf96637b2013-05-04 22:12:25 -0500907 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500908 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400909 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700910 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500911 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700912 if (server->ops->close_dir)
913 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
914 else
915 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500916 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700917 /* not much we can do if it fails anyway, ignore rc */
918 rc = 0;
919 } else
Steve French3afca262016-09-22 18:58:16 -0500920 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700922 buf = cfile->srch_inf.ntwrk_buf_start;
923 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500924 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700925 cfile->srch_inf.ntwrk_buf_start = NULL;
926 if (cfile->srch_inf.smallBuf)
927 cifs_small_buf_release(buf);
928 else
929 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700931
932 cifs_put_tlink(cfile->tlink);
933 kfree(file->private_data);
934 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400936 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 return rc;
938}
939
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400940static struct cifsLockInfo *
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000941cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000942{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400943 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000944 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400945 if (!lock)
946 return lock;
947 lock->offset = offset;
948 lock->length = length;
949 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400950 lock->pid = current->tgid;
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000951 lock->flags = flags;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400952 INIT_LIST_HEAD(&lock->blist);
953 init_waitqueue_head(&lock->block_q);
954 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400955}
956
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700957void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400958cifs_del_lock_waiters(struct cifsLockInfo *lock)
959{
960 struct cifsLockInfo *li, *tmp;
961 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
962 list_del_init(&li->blist);
963 wake_up(&li->block_q);
964 }
965}
966
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400967#define CIFS_LOCK_OP 0
968#define CIFS_READ_OP 1
969#define CIFS_WRITE_OP 2
970
971/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400972static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700973cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000974 __u64 length, __u8 type, __u16 flags,
975 struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400976 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400977{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300978 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700979 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300980 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400981
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700982 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400983 if (offset + length <= li->offset ||
984 offset >= li->offset + li->length)
985 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400986 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
987 server->ops->compare_fids(cfile, cur_cfile)) {
988 /* shared lock prevents write op through the same fid */
989 if (!(li->type & server->vals->shared_lock_type) ||
990 rw_check != CIFS_WRITE_OP)
991 continue;
992 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700993 if ((type & server->vals->shared_lock_type) &&
994 ((server->ops->compare_fids(cfile, cur_cfile) &&
995 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400996 continue;
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000997 if (rw_check == CIFS_LOCK_OP &&
998 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
999 server->ops->compare_fids(cfile, cur_cfile))
1000 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -07001001 if (conf_lock)
1002 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001003 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001004 }
1005 return false;
1006}
1007
Pavel Shilovsky579f9052012-09-19 06:22:44 -07001008bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001009cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001010 __u8 type, __u16 flags,
1011 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001012{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001013 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001014 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +00001015 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001016
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001017 list_for_each_entry(cur, &cinode->llist, llist) {
1018 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001019 flags, cfile, conf_lock,
1020 rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001021 if (rc)
1022 break;
1023 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001024
1025 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001026}
1027
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001028/*
1029 * Check if there is another lock that prevents us to set the lock (mandatory
1030 * style). If such a lock exists, update the flock structure with its
1031 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1032 * or leave it the same if we can't. Returns 0 if we don't need to request to
1033 * the server or 1 otherwise.
1034 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001035static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001036cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1037 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001038{
1039 int rc = 0;
1040 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001041 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001042 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001043 bool exist;
1044
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001045 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001046
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001047 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001048 flock->fl_flags, &conf_lock,
1049 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001050 if (exist) {
1051 flock->fl_start = conf_lock->offset;
1052 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1053 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001054 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001055 flock->fl_type = F_RDLCK;
1056 else
1057 flock->fl_type = F_WRLCK;
1058 } else if (!cinode->can_cache_brlcks)
1059 rc = 1;
1060 else
1061 flock->fl_type = F_UNLCK;
1062
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001063 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001064 return rc;
1065}
1066
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001067static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001068cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001069{
David Howells2b0143b2015-03-17 22:25:59 +00001070 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001071 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001072 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001073 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001074}
1075
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001076/*
1077 * Set the byte-range lock (mandatory style). Returns:
1078 * 1) 0, if we set the lock and don't need to request to the server;
1079 * 2) 1, if no locks prevent us but we need to request to the server;
Colin Ian King413d6102018-10-26 19:07:21 +01001080 * 3) -EACCES, if there is a lock that prevents us and wait is false.
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001081 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001082static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001083cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001084 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001085{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001086 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001087 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001088 bool exist;
1089 int rc = 0;
1090
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001091try_again:
1092 exist = false;
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001093 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001094
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001095 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001096 lock->type, lock->flags, &conf_lock,
1097 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001098 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001099 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001100 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001101 return rc;
1102 }
1103
1104 if (!exist)
1105 rc = 1;
1106 else if (!wait)
1107 rc = -EACCES;
1108 else {
1109 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001110 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001111 rc = wait_event_interruptible(lock->block_q,
1112 (lock->blist.prev == &lock->blist) &&
1113 (lock->blist.next == &lock->blist));
1114 if (!rc)
1115 goto try_again;
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001116 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001117 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001118 }
1119
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001120 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001121 return rc;
1122}
1123
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001124/*
1125 * Check if there is another lock that prevents us to set the lock (posix
1126 * style). If such a lock exists, update the flock structure with its
1127 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1128 * or leave it the same if we can't. Returns 0 if we don't need to request to
1129 * the server or 1 otherwise.
1130 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001131static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001132cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1133{
1134 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001135 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001136 unsigned char saved_type = flock->fl_type;
1137
Pavel Shilovsky50792762011-10-29 17:17:57 +04001138 if ((flock->fl_flags & FL_POSIX) == 0)
1139 return 1;
1140
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001141 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001142 posix_test_lock(file, flock);
1143
1144 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1145 flock->fl_type = saved_type;
1146 rc = 1;
1147 }
1148
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001149 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001150 return rc;
1151}
1152
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001153/*
1154 * Set the byte-range lock (posix style). Returns:
1155 * 1) 0, if we set the lock and don't need to request to the server;
1156 * 2) 1, if we need to request to the server;
1157 * 3) <0, if the error occurs while setting the lock.
1158 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001159static int
1160cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1161{
Al Viro496ad9a2013-01-23 17:07:38 -05001162 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001163 int rc = 1;
1164
1165 if ((flock->fl_flags & FL_POSIX) == 0)
1166 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001167
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001168try_again:
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001169 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001170 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001171 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001172 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001173 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001174
1175 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001176 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001177 if (rc == FILE_LOCK_DEFERRED) {
NeilBrownada5c1d2018-11-30 10:04:08 +11001178 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_blocker);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001179 if (!rc)
1180 goto try_again;
NeilBrowncb03f942018-11-30 10:04:08 +11001181 locks_delete_block(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001182 }
Steve French9ebb3892012-04-01 13:52:54 -05001183 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001184}
1185
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001186int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001187cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001188{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001189 unsigned int xid;
1190 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001191 struct cifsLockInfo *li, *tmp;
1192 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001193 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001194 LOCKING_ANDX_RANGE *buf, *cur;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001195 static const int types[] = {
1196 LOCKING_ANDX_LARGE_FILES,
1197 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1198 };
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001199 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001200
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001201 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001202 tcon = tlink_tcon(cfile->tlink);
1203
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001204 /*
1205 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001206 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001207 */
1208 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001209 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001210 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001211 return -EINVAL;
1212 }
1213
Ross Lagerwall92a81092019-01-08 18:30:56 +00001214 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1215 PAGE_SIZE);
1216 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1217 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001218 max_num = (max_buf - sizeof(struct smb_hdr)) /
1219 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001220 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001221 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001222 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001223 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001224 }
1225
1226 for (i = 0; i < 2; i++) {
1227 cur = buf;
1228 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001229 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001230 if (li->type != types[i])
1231 continue;
1232 cur->Pid = cpu_to_le16(li->pid);
1233 cur->LengthLow = cpu_to_le32((u32)li->length);
1234 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1235 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1236 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1237 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001238 stored_rc = cifs_lockv(xid, tcon,
1239 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001240 (__u8)li->type, 0, num,
1241 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001242 if (stored_rc)
1243 rc = stored_rc;
1244 cur = buf;
1245 num = 0;
1246 } else
1247 cur++;
1248 }
1249
1250 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001251 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001252 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001253 if (stored_rc)
1254 rc = stored_rc;
1255 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001256 }
1257
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001258 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001259 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001260 return rc;
1261}
1262
Jeff Layton3d224622016-05-24 06:27:44 -04001263static __u32
1264hash_lockowner(fl_owner_t owner)
1265{
1266 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1267}
1268
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001269struct lock_to_push {
1270 struct list_head llist;
1271 __u64 offset;
1272 __u64 length;
1273 __u32 pid;
1274 __u16 netfid;
1275 __u8 type;
1276};
1277
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001278static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001279cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001280{
David Howells2b0143b2015-03-17 22:25:59 +00001281 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001282 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001283 struct file_lock *flock;
1284 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001285 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001286 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001287 struct list_head locks_to_send, *el;
1288 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001289 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001290
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001291 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001292
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001293 if (!flctx)
1294 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001295
Jeff Laytone084c1b2015-02-16 14:32:03 -05001296 spin_lock(&flctx->flc_lock);
1297 list_for_each(el, &flctx->flc_posix) {
1298 count++;
1299 }
1300 spin_unlock(&flctx->flc_lock);
1301
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001302 INIT_LIST_HEAD(&locks_to_send);
1303
1304 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001305 * Allocating count locks is enough because no FL_POSIX locks can be
1306 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001307 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001308 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001309 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001310 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1311 if (!lck) {
1312 rc = -ENOMEM;
1313 goto err_out;
1314 }
1315 list_add_tail(&lck->llist, &locks_to_send);
1316 }
1317
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001318 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001319 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001320 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001321 if (el == &locks_to_send) {
1322 /*
1323 * The list ended. We don't have enough allocated
1324 * structures - something is really wrong.
1325 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001326 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001327 break;
1328 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001329 length = 1 + flock->fl_end - flock->fl_start;
1330 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1331 type = CIFS_RDLCK;
1332 else
1333 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001334 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001335 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001336 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001337 lck->length = length;
1338 lck->type = type;
1339 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001340 }
Jeff Layton6109c852015-01-16 15:05:57 -05001341 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001342
1343 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001344 int stored_rc;
1345
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001346 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001347 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001348 lck->type, 0);
1349 if (stored_rc)
1350 rc = stored_rc;
1351 list_del(&lck->llist);
1352 kfree(lck);
1353 }
1354
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001355out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001356 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001357 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001358err_out:
1359 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1360 list_del(&lck->llist);
1361 kfree(lck);
1362 }
1363 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001364}
1365
1366static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001367cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001368{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001369 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001370 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001371 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001372 int rc = 0;
1373
1374 /* we are going to update can_cache_brlcks here - need a write access */
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001375 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001376 if (!cinode->can_cache_brlcks) {
1377 up_write(&cinode->lock_sem);
1378 return rc;
1379 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001380
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001381 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001382 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1383 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001384 rc = cifs_push_posix_locks(cfile);
1385 else
1386 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001387
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001388 cinode->can_cache_brlcks = false;
1389 up_write(&cinode->lock_sem);
1390 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001391}
1392
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001393static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001394cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001395 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001397 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001398 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001399 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001400 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001401 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001402 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001403 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001405 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001406 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001407 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001408 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001409 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001410 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001411 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001412 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001414 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001415 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001416 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001417 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001418 *lock = 1;
1419 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001420 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001421 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001422 *unlock = 1;
1423 /* Check if unlock includes more than one lock range */
1424 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001425 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001426 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001427 *lock = 1;
1428 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001429 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001430 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001431 *lock = 1;
1432 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001433 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001434 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001435 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001437 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001438}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001440static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001441cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001442 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001443{
1444 int rc = 0;
1445 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001446 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1447 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001448 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001449 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001451 if (posix_lck) {
1452 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001453
1454 rc = cifs_posix_lock_test(file, flock);
1455 if (!rc)
1456 return rc;
1457
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001458 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001459 posix_lock_type = CIFS_RDLCK;
1460 else
1461 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001462 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1463 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001464 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001465 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 return rc;
1467 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001468
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001469 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001470 if (!rc)
1471 return rc;
1472
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001473 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001474 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1475 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001476 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001477 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1478 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001479 flock->fl_type = F_UNLCK;
1480 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001481 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1482 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001483 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001484 }
1485
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001486 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001487 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001488 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001489 }
1490
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001491 type &= ~server->vals->exclusive_lock_type;
1492
1493 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1494 type | server->vals->shared_lock_type,
1495 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001496 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001497 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1498 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001499 flock->fl_type = F_RDLCK;
1500 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001501 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1502 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001503 } else
1504 flock->fl_type = F_WRLCK;
1505
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001506 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001507}
1508
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001509void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001510cifs_move_llist(struct list_head *source, struct list_head *dest)
1511{
1512 struct list_head *li, *tmp;
1513 list_for_each_safe(li, tmp, source)
1514 list_move(li, dest);
1515}
1516
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001517void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001518cifs_free_llist(struct list_head *llist)
1519{
1520 struct cifsLockInfo *li, *tmp;
1521 list_for_each_entry_safe(li, tmp, llist, llist) {
1522 cifs_del_lock_waiters(li);
1523 list_del(&li->llist);
1524 kfree(li);
1525 }
1526}
1527
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001528int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001529cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1530 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001531{
1532 int rc = 0, stored_rc;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001533 static const int types[] = {
1534 LOCKING_ANDX_LARGE_FILES,
1535 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1536 };
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001537 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001538 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001539 LOCKING_ANDX_RANGE *buf, *cur;
1540 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001541 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001542 struct cifsLockInfo *li, *tmp;
1543 __u64 length = 1 + flock->fl_end - flock->fl_start;
1544 struct list_head tmp_llist;
1545
1546 INIT_LIST_HEAD(&tmp_llist);
1547
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001548 /*
1549 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001550 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001551 */
1552 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001553 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001554 return -EINVAL;
1555
Ross Lagerwall92a81092019-01-08 18:30:56 +00001556 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1557 PAGE_SIZE);
1558 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1559 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001560 max_num = (max_buf - sizeof(struct smb_hdr)) /
1561 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001562 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001563 if (!buf)
1564 return -ENOMEM;
1565
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001566 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001567 for (i = 0; i < 2; i++) {
1568 cur = buf;
1569 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001570 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001571 if (flock->fl_start > li->offset ||
1572 (flock->fl_start + length) <
1573 (li->offset + li->length))
1574 continue;
1575 if (current->tgid != li->pid)
1576 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001577 if (types[i] != li->type)
1578 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001579 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001580 /*
1581 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001582 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001583 */
1584 list_del(&li->llist);
1585 cifs_del_lock_waiters(li);
1586 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001587 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001588 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001589 cur->Pid = cpu_to_le16(li->pid);
1590 cur->LengthLow = cpu_to_le32((u32)li->length);
1591 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1592 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1593 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1594 /*
1595 * We need to save a lock here to let us add it again to
1596 * the file's list if the unlock range request fails on
1597 * the server.
1598 */
1599 list_move(&li->llist, &tmp_llist);
1600 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001601 stored_rc = cifs_lockv(xid, tcon,
1602 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001603 li->type, num, 0, buf);
1604 if (stored_rc) {
1605 /*
1606 * We failed on the unlock range
1607 * request - add all locks from the tmp
1608 * list to the head of the file's list.
1609 */
1610 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001611 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001612 rc = stored_rc;
1613 } else
1614 /*
1615 * The unlock range request succeed -
1616 * free the tmp list.
1617 */
1618 cifs_free_llist(&tmp_llist);
1619 cur = buf;
1620 num = 0;
1621 } else
1622 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001623 }
1624 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001625 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001626 types[i], num, 0, buf);
1627 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001628 cifs_move_llist(&tmp_llist,
1629 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001630 rc = stored_rc;
1631 } else
1632 cifs_free_llist(&tmp_llist);
1633 }
1634 }
1635
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001636 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001637 kfree(buf);
1638 return rc;
1639}
1640
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001641static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001642cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001643 bool wait_flag, bool posix_lck, int lock, int unlock,
1644 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001645{
1646 int rc = 0;
1647 __u64 length = 1 + flock->fl_end - flock->fl_start;
1648 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1649 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001650 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001651 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001652
1653 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001654 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001655
1656 rc = cifs_posix_lock_set(file, flock);
1657 if (!rc || rc < 0)
1658 return rc;
1659
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001660 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001661 posix_lock_type = CIFS_RDLCK;
1662 else
1663 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001664
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001665 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001666 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001667
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001668 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001669 hash_lockowner(flock->fl_owner),
1670 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001671 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001672 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001673 }
1674
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001675 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001676 struct cifsLockInfo *lock;
1677
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001678 lock = cifs_lock_init(flock->fl_start, length, type,
1679 flock->fl_flags);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001680 if (!lock)
1681 return -ENOMEM;
1682
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001683 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001684 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001685 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001686 return rc;
1687 }
1688 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001689 goto out;
1690
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001691 /*
1692 * Windows 7 server can delay breaking lease from read to None
1693 * if we set a byte-range lock on a file - break it explicitly
1694 * before sending the lock to the server to be sure the next
1695 * read won't conflict with non-overlapted locks due to
1696 * pagereading.
1697 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001698 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1699 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001700 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001701 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1702 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001703 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001704 }
1705
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001706 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1707 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001708 if (rc) {
1709 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001710 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001711 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001712
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001713 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001714 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001715 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001716
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001717out:
Steve Frenchd0677992019-07-16 18:55:38 -05001718 if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) {
Aurelien Aptelbc31d0c2019-03-14 18:44:16 +01001719 /*
1720 * If this is a request to remove all locks because we
1721 * are closing the file, it doesn't matter if the
1722 * unlocking failed as both cifs.ko and the SMB server
1723 * remove the lock on file close
1724 */
1725 if (rc) {
1726 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1727 if (!(flock->fl_flags & FL_CLOSE))
1728 return rc;
1729 }
Benjamin Coddington4f656362015-10-22 13:38:14 -04001730 rc = locks_lock_file_wait(file, flock);
Aurelien Aptelbc31d0c2019-03-14 18:44:16 +01001731 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001732 return rc;
1733}
1734
Steve Frenchd0677992019-07-16 18:55:38 -05001735int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
1736{
1737 int rc, xid;
1738 int lock = 0, unlock = 0;
1739 bool wait_flag = false;
1740 bool posix_lck = false;
1741 struct cifs_sb_info *cifs_sb;
1742 struct cifs_tcon *tcon;
Steve Frenchd0677992019-07-16 18:55:38 -05001743 struct cifsFileInfo *cfile;
Steve Frenchd0677992019-07-16 18:55:38 -05001744 __u32 type;
1745
1746 rc = -EACCES;
1747 xid = get_xid();
1748
1749 if (!(fl->fl_flags & FL_FLOCK))
1750 return -ENOLCK;
1751
1752 cfile = (struct cifsFileInfo *)file->private_data;
1753 tcon = tlink_tcon(cfile->tlink);
1754
1755 cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
1756 tcon->ses->server);
1757 cifs_sb = CIFS_FILE_SB(file);
Steve Frenchd0677992019-07-16 18:55:38 -05001758
1759 if (cap_unix(tcon->ses) &&
1760 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1761 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1762 posix_lck = true;
1763
1764 if (!lock && !unlock) {
1765 /*
1766 * if no lock or unlock then nothing to do since we do not
1767 * know what it is
1768 */
1769 free_xid(xid);
1770 return -EOPNOTSUPP;
1771 }
1772
1773 rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
1774 xid);
1775 free_xid(xid);
1776 return rc;
1777
1778
1779}
1780
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001781int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1782{
1783 int rc, xid;
1784 int lock = 0, unlock = 0;
1785 bool wait_flag = false;
1786 bool posix_lck = false;
1787 struct cifs_sb_info *cifs_sb;
1788 struct cifs_tcon *tcon;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001789 struct cifsFileInfo *cfile;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001790 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001791
1792 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001793 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001794
Joe Perchesf96637b2013-05-04 22:12:25 -05001795 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1796 cmd, flock->fl_flags, flock->fl_type,
1797 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001798
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001799 cfile = (struct cifsFileInfo *)file->private_data;
1800 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001801
1802 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1803 tcon->ses->server);
Al Viro7119e222014-10-22 00:25:12 -04001804 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001805
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001806 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001807 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1808 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1809 posix_lck = true;
1810 /*
1811 * BB add code here to normalize offset and length to account for
1812 * negative length which we can not accept over the wire.
1813 */
1814 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001815 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001816 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001817 return rc;
1818 }
1819
1820 if (!lock && !unlock) {
1821 /*
1822 * if no lock or unlock then nothing to do since we do not
1823 * know what it is
1824 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001825 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001826 return -EOPNOTSUPP;
1827 }
1828
1829 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1830 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001831 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 return rc;
1833}
1834
Jeff Layton597b0272012-03-23 14:40:56 -04001835/*
1836 * update the file size (if needed) after a write. Should be called with
1837 * the inode->i_lock held
1838 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001839void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001840cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1841 unsigned int bytes_written)
1842{
1843 loff_t end_of_write = offset + bytes_written;
1844
1845 if (end_of_write > cifsi->server_eof)
1846 cifsi->server_eof = end_of_write;
1847}
1848
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001849static ssize_t
1850cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1851 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852{
1853 int rc = 0;
1854 unsigned int bytes_written = 0;
1855 unsigned int total_written;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001856 struct cifs_tcon *tcon;
1857 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001858 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001859 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001860 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001861 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862
Al Viro35c265e2014-08-19 20:25:34 -04001863 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1864 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001866 tcon = tlink_tcon(open_file->tlink);
1867 server = tcon->ses->server;
1868
1869 if (!server->ops->sync_write)
1870 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001871
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001872 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 for (total_written = 0; write_size > total_written;
1875 total_written += bytes_written) {
1876 rc = -EAGAIN;
1877 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001878 struct kvec iov[2];
1879 unsigned int len;
1880
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 /* we could deadlock if we called
1883 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001884 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001886 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 if (rc != 0)
1888 break;
1889 }
Steve French3e844692005-10-03 13:37:24 -07001890
David Howells2b0143b2015-03-17 22:25:59 +00001891 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001892 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001893 /* iov[0] is reserved for smb header */
1894 iov[1].iov_base = (char *)write_data + total_written;
1895 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001896 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001897 io_parms.tcon = tcon;
1898 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001899 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001900 rc = server->ops->sync_write(xid, &open_file->fid,
1901 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 }
1903 if (rc || (bytes_written == 0)) {
1904 if (total_written)
1905 break;
1906 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001907 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 return rc;
1909 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001910 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001911 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001912 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001913 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001914 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001915 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 }
1917
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001918 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919
Jeff Layton7da4b492010-10-15 15:34:00 -04001920 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001921 spin_lock(&d_inode(dentry)->i_lock);
1922 if (*offset > d_inode(dentry)->i_size)
1923 i_size_write(d_inode(dentry), *offset);
1924 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 }
David Howells2b0143b2015-03-17 22:25:59 +00001926 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001927 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 return total_written;
1929}
1930
Jeff Layton6508d902010-09-29 19:51:11 -04001931struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1932 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001933{
1934 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001935 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1936
1937 /* only filter by fsuid on multiuser mounts */
1938 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1939 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001940
Dave Wysochanskicb248812019-10-03 15:16:27 +10001941 spin_lock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001942 /* we could simply get the first_list_entry since write-only entries
1943 are always at the end of the list but since the first entry might
1944 have a close pending, we go through the whole list */
1945 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001946 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001947 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001948 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001949 if (!open_file->invalidHandle) {
1950 /* found a good file */
1951 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001952 cifsFileInfo_get(open_file);
Dave Wysochanskicb248812019-10-03 15:16:27 +10001953 spin_unlock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001954 return open_file;
1955 } /* else might as well continue, and look for
1956 another, or simply have the caller reopen it
1957 again rather than trying to fix this handle */
1958 } else /* write only file */
1959 break; /* write only files are last so must be done */
1960 }
Dave Wysochanskicb248812019-10-03 15:16:27 +10001961 spin_unlock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001962 return NULL;
1963}
Steve French630f3f0c2007-10-25 21:17:17 +00001964
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001965/* Return -EBADF if no handle is found and general rc otherwise */
1966int
1967cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
1968 struct cifsFileInfo **ret_file)
Steve French6148a742005-10-05 12:23:19 -07001969{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001970 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001971 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001972 bool any_available = false;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001973 int rc = -EBADF;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001974 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001975
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001976 *ret_file = NULL;
1977
1978 /*
1979 * Having a null inode here (because mapping->host was set to zero by
1980 * the VFS or MM) should not happen but we had reports of on oops (due
1981 * to it being zero) during stress testcases so we need to check for it
1982 */
Steve French60808232006-04-22 15:53:05 +00001983
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001984 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001985 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001986 dump_stack();
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001987 return rc;
Steve French60808232006-04-22 15:53:05 +00001988 }
1989
Jeff Laytond3892292010-11-02 16:22:50 -04001990 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1991
Jeff Layton6508d902010-09-29 19:51:11 -04001992 /* only filter by fsuid on multiuser mounts */
1993 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1994 fsuid_only = false;
1995
Dave Wysochanskicb248812019-10-03 15:16:27 +10001996 spin_lock(&cifs_inode->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001997refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001998 if (refind > MAX_REOPEN_ATT) {
Dave Wysochanskicb248812019-10-03 15:16:27 +10001999 spin_unlock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002000 return rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002001 }
Steve French6148a742005-10-05 12:23:19 -07002002 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04002003 if (!any_available && open_file->pid != current->tgid)
2004 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08002005 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04002006 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04002007 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00002008 if (!open_file->invalidHandle) {
2009 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05002010 cifsFileInfo_get(open_file);
Dave Wysochanskicb248812019-10-03 15:16:27 +10002011 spin_unlock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002012 *ret_file = open_file;
2013 return 0;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002014 } else {
2015 if (!inv_file)
2016 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00002017 }
Steve French6148a742005-10-05 12:23:19 -07002018 }
2019 }
Jeff Layton2846d382008-09-22 21:33:33 -04002020 /* couldn't find useable FH with same pid, try any available */
2021 if (!any_available) {
2022 any_available = true;
2023 goto refind_writable;
2024 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002025
2026 if (inv_file) {
2027 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05002028 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002029 }
2030
Dave Wysochanskicb248812019-10-03 15:16:27 +10002031 spin_unlock(&cifs_inode->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002032
2033 if (inv_file) {
2034 rc = cifs_reopen_file(inv_file, false);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002035 if (!rc) {
2036 *ret_file = inv_file;
2037 return 0;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002038 }
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002039
Ronnie Sahlberg487317c2019-06-05 10:38:38 +10002040 spin_lock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002041 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
Ronnie Sahlberg487317c2019-06-05 10:38:38 +10002042 spin_unlock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002043 cifsFileInfo_put(inv_file);
2044 ++refind;
2045 inv_file = NULL;
Dave Wysochanskicb248812019-10-03 15:16:27 +10002046 spin_lock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002047 goto refind_writable;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002048 }
2049
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002050 return rc;
2051}
2052
2053struct cifsFileInfo *
2054find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
2055{
2056 struct cifsFileInfo *cfile;
2057 int rc;
2058
2059 rc = cifs_get_writable_file(cifs_inode, fsuid_only, &cfile);
2060 if (rc)
2061 cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc);
2062
2063 return cfile;
Steve French6148a742005-10-05 12:23:19 -07002064}
2065
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002066int
2067cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2068 struct cifsFileInfo **ret_file)
2069{
2070 struct list_head *tmp;
2071 struct cifsFileInfo *cfile;
2072 struct cifsInodeInfo *cinode;
2073 char *full_path;
2074
2075 *ret_file = NULL;
2076
2077 spin_lock(&tcon->open_file_lock);
2078 list_for_each(tmp, &tcon->openFileList) {
2079 cfile = list_entry(tmp, struct cifsFileInfo,
2080 tlist);
2081 full_path = build_path_from_dentry(cfile->dentry);
2082 if (full_path == NULL) {
2083 spin_unlock(&tcon->open_file_lock);
2084 return -ENOMEM;
2085 }
2086 if (strcmp(full_path, name)) {
2087 kfree(full_path);
2088 continue;
2089 }
2090
2091 kfree(full_path);
2092 cinode = CIFS_I(d_inode(cfile->dentry));
2093 spin_unlock(&tcon->open_file_lock);
2094 return cifs_get_writable_file(cinode, 0, ret_file);
2095 }
2096
2097 spin_unlock(&tcon->open_file_lock);
2098 return -ENOENT;
2099}
2100
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002101int
2102cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2103 struct cifsFileInfo **ret_file)
2104{
2105 struct list_head *tmp;
2106 struct cifsFileInfo *cfile;
2107 struct cifsInodeInfo *cinode;
2108 char *full_path;
2109
2110 *ret_file = NULL;
2111
2112 spin_lock(&tcon->open_file_lock);
2113 list_for_each(tmp, &tcon->openFileList) {
2114 cfile = list_entry(tmp, struct cifsFileInfo,
2115 tlist);
2116 full_path = build_path_from_dentry(cfile->dentry);
2117 if (full_path == NULL) {
2118 spin_unlock(&tcon->open_file_lock);
2119 return -ENOMEM;
2120 }
2121 if (strcmp(full_path, name)) {
2122 kfree(full_path);
2123 continue;
2124 }
2125
2126 kfree(full_path);
2127 cinode = CIFS_I(d_inode(cfile->dentry));
2128 spin_unlock(&tcon->open_file_lock);
2129 *ret_file = find_readable_file(cinode, 0);
2130 return *ret_file ? 0 : -ENOENT;
2131 }
2132
2133 spin_unlock(&tcon->open_file_lock);
2134 return -ENOENT;
2135}
2136
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
2138{
2139 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002140 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 char *write_data;
2142 int rc = -EFAULT;
2143 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07002145 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146
2147 if (!mapping || !mapping->host)
2148 return -EFAULT;
2149
2150 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
2152 offset += (loff_t)from;
2153 write_data = kmap(page);
2154 write_data += from;
2155
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002156 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 kunmap(page);
2158 return -EIO;
2159 }
2160
2161 /* racing with truncate? */
2162 if (offset > mapping->host->i_size) {
2163 kunmap(page);
2164 return 0; /* don't care */
2165 }
2166
2167 /* check to make sure that we are not extending the file */
2168 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002169 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002171 rc = cifs_get_writable_file(CIFS_I(mapping->host), false, &open_file);
2172 if (!rc) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04002173 bytes_written = cifs_write(open_file, open_file->pid,
2174 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04002175 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07002177 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002178 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07002179 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002180 else if (bytes_written < 0)
2181 rc = bytes_written;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002182 else
2183 rc = -EFAULT;
Steve French6148a742005-10-05 12:23:19 -07002184 } else {
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002185 cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
2186 if (!is_retryable_error(rc))
2187 rc = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 }
2189
2190 kunmap(page);
2191 return rc;
2192}
2193
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002194static struct cifs_writedata *
2195wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2196 pgoff_t end, pgoff_t *index,
2197 unsigned int *found_pages)
2198{
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002199 struct cifs_writedata *wdata;
2200
2201 wdata = cifs_writedata_alloc((unsigned int)tofind,
2202 cifs_writev_complete);
2203 if (!wdata)
2204 return NULL;
2205
Jan Kara9c19a9c2017-11-15 17:35:26 -08002206 *found_pages = find_get_pages_range_tag(mapping, index, end,
2207 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002208 return wdata;
2209}
2210
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002211static unsigned int
2212wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2213 struct address_space *mapping,
2214 struct writeback_control *wbc,
2215 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2216{
2217 unsigned int nr_pages = 0, i;
2218 struct page *page;
2219
2220 for (i = 0; i < found_pages; i++) {
2221 page = wdata->pages[i];
2222 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002223 * At this point we hold neither the i_pages lock nor the
2224 * page lock: the page may be truncated or invalidated
2225 * (changing page->mapping to NULL), or even swizzled
2226 * back from swapper_space to tmpfs file mapping
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002227 */
2228
2229 if (nr_pages == 0)
2230 lock_page(page);
2231 else if (!trylock_page(page))
2232 break;
2233
2234 if (unlikely(page->mapping != mapping)) {
2235 unlock_page(page);
2236 break;
2237 }
2238
2239 if (!wbc->range_cyclic && page->index > end) {
2240 *done = true;
2241 unlock_page(page);
2242 break;
2243 }
2244
2245 if (*next && (page->index != *next)) {
2246 /* Not next consecutive page */
2247 unlock_page(page);
2248 break;
2249 }
2250
2251 if (wbc->sync_mode != WB_SYNC_NONE)
2252 wait_on_page_writeback(page);
2253
2254 if (PageWriteback(page) ||
2255 !clear_page_dirty_for_io(page)) {
2256 unlock_page(page);
2257 break;
2258 }
2259
2260 /*
2261 * This actually clears the dirty bit in the radix tree.
2262 * See cifs_writepage() for more commentary.
2263 */
2264 set_page_writeback(page);
2265 if (page_offset(page) >= i_size_read(mapping->host)) {
2266 *done = true;
2267 unlock_page(page);
2268 end_page_writeback(page);
2269 break;
2270 }
2271
2272 wdata->pages[i] = page;
2273 *next = page->index + 1;
2274 ++nr_pages;
2275 }
2276
2277 /* reset index to refind any pages skipped */
2278 if (nr_pages == 0)
2279 *index = wdata->pages[0]->index + 1;
2280
2281 /* put any pages we aren't going to use */
2282 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002283 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002284 wdata->pages[i] = NULL;
2285 }
2286
2287 return nr_pages;
2288}
2289
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002290static int
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002291wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2292 struct address_space *mapping, struct writeback_control *wbc)
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002293{
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002294 int rc;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002295 struct TCP_Server_Info *server =
2296 tlink_tcon(wdata->cfile->tlink)->ses->server;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002297
2298 wdata->sync_mode = wbc->sync_mode;
2299 wdata->nr_pages = nr_pages;
2300 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002301 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002302 wdata->tailsz = min(i_size_read(mapping->host) -
2303 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002304 (loff_t)PAGE_SIZE);
2305 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002306 wdata->pid = wdata->cfile->pid;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002307
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002308 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2309 if (rc)
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002310 return rc;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002311
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002312 if (wdata->cfile->invalidHandle)
2313 rc = -EAGAIN;
2314 else
2315 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002316
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002317 return rc;
2318}
2319
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002321 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322{
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002323 struct inode *inode = mapping->host;
2324 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002325 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002326 bool done = false, scanned = false, range_whole = false;
2327 pgoff_t end, index;
2328 struct cifs_writedata *wdata;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002329 struct cifsFileInfo *cfile = NULL;
Steve French37c0eb42005-10-05 14:50:29 -07002330 int rc = 0;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002331 int saved_rc = 0;
Steve French0cb012d2018-10-11 01:01:02 -05002332 unsigned int xid;
Steve French50c2f752007-07-13 00:33:32 +00002333
Steve French37c0eb42005-10-05 14:50:29 -07002334 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002335 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002336 * one page at a time via cifs_writepage
2337 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002338 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002339 return generic_writepages(mapping, wbc);
2340
Steve French0cb012d2018-10-11 01:01:02 -05002341 xid = get_xid();
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002342 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002343 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002344 end = -1;
2345 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002346 index = wbc->range_start >> PAGE_SHIFT;
2347 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002348 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002349 range_whole = true;
2350 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002351 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002352 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002353retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002354 while (!done && index <= end) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002355 unsigned int i, nr_pages, found_pages, wsize;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002356 pgoff_t next = 0, tofind, saved_index = index;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002357 struct cifs_credits credits_on_stack;
2358 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002359 int get_file_rc = 0;
Steve French37c0eb42005-10-05 14:50:29 -07002360
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002361 if (cfile)
2362 cifsFileInfo_put(cfile);
2363
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002364 rc = cifs_get_writable_file(CIFS_I(inode), false, &cfile);
2365
2366 /* in case of an error store it to return later */
2367 if (rc)
2368 get_file_rc = rc;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002369
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002370 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002371 &wsize, credits);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002372 if (rc != 0) {
2373 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002374 break;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002375 }
Steve French37c0eb42005-10-05 14:50:29 -07002376
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002377 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002378
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002379 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2380 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002381 if (!wdata) {
2382 rc = -ENOMEM;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002383 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002384 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002385 break;
2386 }
2387
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002388 if (found_pages == 0) {
2389 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002390 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002391 break;
2392 }
2393
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002394 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2395 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002396
2397 /* nothing to write? */
2398 if (nr_pages == 0) {
2399 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002400 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002401 continue;
2402 }
2403
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002404 wdata->credits = credits_on_stack;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002405 wdata->cfile = cfile;
2406 cfile = NULL;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002407
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002408 if (!wdata->cfile) {
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002409 cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
2410 get_file_rc);
2411 if (is_retryable_error(get_file_rc))
2412 rc = get_file_rc;
2413 else
2414 rc = -EBADF;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002415 } else
2416 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002417
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002418 for (i = 0; i < nr_pages; ++i)
2419 unlock_page(wdata->pages[i]);
2420
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002421 /* send failure -- clean up the mess */
2422 if (rc != 0) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002423 add_credits_and_wake_if(server, &wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002424 for (i = 0; i < nr_pages; ++i) {
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002425 if (is_retryable_error(rc))
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002426 redirty_page_for_writepage(wbc,
2427 wdata->pages[i]);
2428 else
2429 SetPageError(wdata->pages[i]);
2430 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002431 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002432 }
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002433 if (!is_retryable_error(rc))
Jeff Layton941b8532011-01-11 07:24:01 -05002434 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002435 }
2436 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002437
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002438 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2439 index = saved_index;
2440 continue;
2441 }
2442
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002443 /* Return immediately if we received a signal during writing */
2444 if (is_interrupt_error(rc)) {
2445 done = true;
2446 break;
2447 }
2448
2449 if (rc != 0 && saved_rc == 0)
2450 saved_rc = rc;
2451
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002452 wbc->nr_to_write -= nr_pages;
2453 if (wbc->nr_to_write <= 0)
2454 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002455
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002456 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002457 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002458
Steve French37c0eb42005-10-05 14:50:29 -07002459 if (!scanned && !done) {
2460 /*
2461 * We hit the last page and there is more work to be done: wrap
2462 * back to the start of the file
2463 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002464 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002465 index = 0;
2466 goto retry;
2467 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002468
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002469 if (saved_rc != 0)
2470 rc = saved_rc;
2471
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002472 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002473 mapping->writeback_index = index;
2474
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002475 if (cfile)
2476 cifsFileInfo_put(cfile);
Steve French0cb012d2018-10-11 01:01:02 -05002477 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 return rc;
2479}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002481static int
2482cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002484 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002485 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002487 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002489 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002490 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002491 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002492
2493 /*
2494 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2495 *
2496 * A writepage() implementation always needs to do either this,
2497 * or re-dirty the page with "redirty_page_for_writepage()" in
2498 * the case of a failure.
2499 *
2500 * Just unlocking the page will cause the radix tree tag-bits
2501 * to fail to update with the state of the page correctly.
2502 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002503 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002504retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002505 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002506 if (is_retryable_error(rc)) {
2507 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
Jeff Layton97b37f22017-05-25 06:59:52 -04002508 goto retry_write;
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002509 redirty_page_for_writepage(wbc, page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002510 } else if (rc != 0) {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002511 SetPageError(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002512 mapping_set_error(page->mapping, rc);
2513 } else {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002514 SetPageUptodate(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002515 }
Linus Torvaldscb876f42006-12-23 16:19:07 -08002516 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002517 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002518 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 return rc;
2520}
2521
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002522static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2523{
2524 int rc = cifs_writepage_locked(page, wbc);
2525 unlock_page(page);
2526 return rc;
2527}
2528
Nick Piggind9414772008-09-24 11:32:59 -04002529static int cifs_write_end(struct file *file, struct address_space *mapping,
2530 loff_t pos, unsigned len, unsigned copied,
2531 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532{
Nick Piggind9414772008-09-24 11:32:59 -04002533 int rc;
2534 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002535 struct cifsFileInfo *cfile = file->private_data;
2536 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2537 __u32 pid;
2538
2539 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2540 pid = cfile->pid;
2541 else
2542 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543
Joe Perchesf96637b2013-05-04 22:12:25 -05002544 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002545 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002546
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002547 if (PageChecked(page)) {
2548 if (copied == len)
2549 SetPageUptodate(page);
2550 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002551 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002552 SetPageUptodate(page);
2553
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002555 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002556 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002557 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002558
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002559 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560 /* this is probably better than directly calling
2561 partialpage_write since in this function the file handle is
2562 known which we might as well leverage */
2563 /* BB check if anything else missing out of ppw
2564 such as updating last write time */
2565 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002566 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002567 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002569
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002570 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002571 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002572 rc = copied;
2573 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002574 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 }
2576
Nick Piggind9414772008-09-24 11:32:59 -04002577 if (rc > 0) {
2578 spin_lock(&inode->i_lock);
2579 if (pos > inode->i_size)
2580 i_size_write(inode, pos);
2581 spin_unlock(&inode->i_lock);
2582 }
2583
2584 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002585 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002586
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 return rc;
2588}
2589
Josef Bacik02c24a82011-07-16 20:44:56 -04002590int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2591 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002593 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002595 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002596 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002597 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002598 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002599 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002601 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002602 if (rc)
2603 return rc;
Josef Bacik02c24a82011-07-16 20:44:56 -04002604
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002605 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606
Al Viro35c265e2014-08-19 20:25:34 -04002607 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2608 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002609
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002610 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002611 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002612 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002613 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002614 rc = 0; /* don't care about it in fsync */
2615 }
2616 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002617
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002618 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002619 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2620 server = tcon->ses->server;
2621 if (server->ops->flush)
2622 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2623 else
2624 rc = -ENOSYS;
2625 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002626
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002627 free_xid(xid);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002628 return rc;
2629}
2630
Josef Bacik02c24a82011-07-16 20:44:56 -04002631int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002632{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002633 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002634 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002635 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002636 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002637 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002638 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002639
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002640 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002641 if (rc)
2642 return rc;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002643
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002644 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002645
Al Viro35c265e2014-08-19 20:25:34 -04002646 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2647 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002648
2649 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002650 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2651 server = tcon->ses->server;
2652 if (server->ops->flush)
2653 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2654 else
2655 rc = -ENOSYS;
2656 }
Steve Frenchb298f222009-02-21 21:17:43 +00002657
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002658 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 return rc;
2660}
2661
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662/*
2663 * As file closes, flush all cached write data for this inode checking
2664 * for write behind errors.
2665 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002666int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667{
Al Viro496ad9a2013-01-23 17:07:38 -05002668 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 int rc = 0;
2670
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002671 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002672 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002673
Joe Perchesf96637b2013-05-04 22:12:25 -05002674 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675
2676 return rc;
2677}
2678
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002679static int
2680cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2681{
2682 int rc = 0;
2683 unsigned long i;
2684
2685 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002686 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002687 if (!pages[i]) {
2688 /*
2689 * save number of pages we have already allocated and
2690 * return with ENOMEM error
2691 */
2692 num_pages = i;
2693 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002694 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002695 }
2696 }
2697
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002698 if (rc) {
2699 for (i = 0; i < num_pages; i++)
2700 put_page(pages[i]);
2701 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002702 return rc;
2703}
2704
2705static inline
2706size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2707{
2708 size_t num_pages;
2709 size_t clen;
2710
2711 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002712 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002713
2714 if (cur_len)
2715 *cur_len = clen;
2716
2717 return num_pages;
2718}
2719
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002720static void
Steve French4a5c80d2014-02-07 20:45:12 -06002721cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002722{
2723 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002724 struct cifs_writedata *wdata = container_of(refcount,
2725 struct cifs_writedata, refcount);
2726
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002727 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
Steve French4a5c80d2014-02-07 20:45:12 -06002728 for (i = 0; i < wdata->nr_pages; i++)
2729 put_page(wdata->pages[i]);
2730 cifs_writedata_release(refcount);
2731}
2732
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002733static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2734
Steve French4a5c80d2014-02-07 20:45:12 -06002735static void
2736cifs_uncached_writev_complete(struct work_struct *work)
2737{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002738 struct cifs_writedata *wdata = container_of(work,
2739 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002740 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002741 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2742
2743 spin_lock(&inode->i_lock);
2744 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2745 if (cifsi->server_eof > inode->i_size)
2746 i_size_write(inode, cifsi->server_eof);
2747 spin_unlock(&inode->i_lock);
2748
2749 complete(&wdata->done);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002750 collect_uncached_write_data(wdata->ctx);
2751 /* the below call can possibly free the last ref to aio ctx */
Steve French4a5c80d2014-02-07 20:45:12 -06002752 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002753}
2754
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002755static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002756wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2757 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002758{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002759 size_t save_len, copied, bytes, cur_len = *len;
2760 unsigned long i, nr_pages = *num_pages;
2761
2762 save_len = cur_len;
2763 for (i = 0; i < nr_pages; i++) {
2764 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2765 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2766 cur_len -= copied;
2767 /*
2768 * If we didn't copy as much as we expected, then that
2769 * may mean we trod into an unmapped area. Stop copying
2770 * at that point. On the next pass through the big
2771 * loop, we'll likely end up getting a zero-length
2772 * write and bailing out of it.
2773 */
2774 if (copied < bytes)
2775 break;
2776 }
2777 cur_len = save_len - cur_len;
2778 *len = cur_len;
2779
2780 /*
2781 * If we have no data to send, then that probably means that
2782 * the copy above failed altogether. That's most likely because
2783 * the address in the iovec was bogus. Return -EFAULT and let
2784 * the caller free anything we allocated and bail out.
2785 */
2786 if (!cur_len)
2787 return -EFAULT;
2788
2789 /*
2790 * i + 1 now represents the number of pages we actually used in
2791 * the copy phase above.
2792 */
2793 *num_pages = i + 1;
2794 return 0;
2795}
2796
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002797static int
Long Li8c5f9c12018-10-31 22:13:10 +00002798cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
2799 struct cifs_aio_ctx *ctx)
2800{
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002801 unsigned int wsize;
2802 struct cifs_credits credits;
Long Li8c5f9c12018-10-31 22:13:10 +00002803 int rc;
2804 struct TCP_Server_Info *server =
2805 tlink_tcon(wdata->cfile->tlink)->ses->server;
2806
Long Li8c5f9c12018-10-31 22:13:10 +00002807 do {
Long Lid53e2922019-03-15 07:54:59 +00002808 if (wdata->cfile->invalidHandle) {
Long Li8c5f9c12018-10-31 22:13:10 +00002809 rc = cifs_reopen_file(wdata->cfile, false);
Long Lid53e2922019-03-15 07:54:59 +00002810 if (rc == -EAGAIN)
2811 continue;
2812 else if (rc)
2813 break;
2814 }
2815
2816
2817 /*
2818 * Wait for credits to resend this wdata.
2819 * Note: we are attempting to resend the whole wdata not in
2820 * segments
2821 */
2822 do {
2823 rc = server->ops->wait_mtu_credits(server, wdata->bytes,
2824 &wsize, &credits);
2825 if (rc)
2826 goto fail;
2827
2828 if (wsize < wdata->bytes) {
2829 add_credits_and_wake_if(server, &credits, 0);
2830 msleep(1000);
2831 }
2832 } while (wsize < wdata->bytes);
2833 wdata->credits = credits;
2834
2835 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2836
2837 if (!rc) {
2838 if (wdata->cfile->invalidHandle)
2839 rc = -EAGAIN;
Long Lib7a55bb2019-10-15 22:54:50 +00002840 else {
2841#ifdef CONFIG_CIFS_SMB_DIRECT
2842 if (wdata->mr) {
2843 wdata->mr->need_invalidate = true;
2844 smbd_deregister_mr(wdata->mr);
2845 wdata->mr = NULL;
2846 }
2847#endif
Long Lid53e2922019-03-15 07:54:59 +00002848 rc = server->ops->async_writev(wdata,
Long Li8c5f9c12018-10-31 22:13:10 +00002849 cifs_uncached_writedata_release);
Long Lib7a55bb2019-10-15 22:54:50 +00002850 }
Long Lid53e2922019-03-15 07:54:59 +00002851 }
Long Li8c5f9c12018-10-31 22:13:10 +00002852
Long Lid53e2922019-03-15 07:54:59 +00002853 /* If the write was successfully sent, we are done */
2854 if (!rc) {
2855 list_add_tail(&wdata->list, wdata_list);
2856 return 0;
2857 }
Long Li8c5f9c12018-10-31 22:13:10 +00002858
Long Lid53e2922019-03-15 07:54:59 +00002859 /* Roll back credits and retry if needed */
2860 add_credits_and_wake_if(server, &wdata->credits, 0);
2861 } while (rc == -EAGAIN);
2862
2863fail:
Long Li8c5f9c12018-10-31 22:13:10 +00002864 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Long Li8c5f9c12018-10-31 22:13:10 +00002865 return rc;
2866}
2867
2868static int
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002869cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2870 struct cifsFileInfo *open_file,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002871 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2872 struct cifs_aio_ctx *ctx)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002873{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002874 int rc = 0;
2875 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002876 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002877 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002878 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002879 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002880 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002881 struct TCP_Server_Info *server;
Long Li8c5f9c12018-10-31 22:13:10 +00002882 struct page **pagevec;
2883 size_t start;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002884 unsigned int xid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002885
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002886 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2887 pid = open_file->pid;
2888 else
2889 pid = current->tgid;
2890
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002891 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002892 xid = get_xid();
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002893
2894 do {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002895 unsigned int wsize;
2896 struct cifs_credits credits_on_stack;
2897 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002898
Pavel Shilovsky3e952992019-01-25 11:59:01 -08002899 if (open_file->invalidHandle) {
2900 rc = cifs_reopen_file(open_file, false);
2901 if (rc == -EAGAIN)
2902 continue;
2903 else if (rc)
2904 break;
2905 }
2906
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002907 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002908 &wsize, credits);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002909 if (rc)
2910 break;
2911
Long Lib6bc8a72018-12-16 23:17:04 +00002912 cur_len = min_t(const size_t, len, wsize);
2913
Long Li8c5f9c12018-10-31 22:13:10 +00002914 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05002915 ssize_t result;
2916
2917 result = iov_iter_get_pages_alloc(
Long Lib6bc8a72018-12-16 23:17:04 +00002918 from, &pagevec, cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05002919 if (result < 0) {
Long Li8c5f9c12018-10-31 22:13:10 +00002920 cifs_dbg(VFS,
2921 "direct_writev couldn't get user pages "
2922 "(rc=%zd) iter type %d iov_offset %zd "
2923 "count %zd\n",
Steve Frenchb98e26d2018-11-01 10:54:32 -05002924 result, from->type,
Long Li8c5f9c12018-10-31 22:13:10 +00002925 from->iov_offset, from->count);
2926 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00002927
2928 rc = result;
2929 add_credits_and_wake_if(server, credits, 0);
Long Li8c5f9c12018-10-31 22:13:10 +00002930 break;
2931 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05002932 cur_len = (size_t)result;
Long Li8c5f9c12018-10-31 22:13:10 +00002933 iov_iter_advance(from, cur_len);
2934
2935 nr_pages =
2936 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
2937
2938 wdata = cifs_writedata_direct_alloc(pagevec,
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002939 cifs_uncached_writev_complete);
Long Li8c5f9c12018-10-31 22:13:10 +00002940 if (!wdata) {
2941 rc = -ENOMEM;
2942 add_credits_and_wake_if(server, credits, 0);
2943 break;
2944 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002945
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002946
Long Li8c5f9c12018-10-31 22:13:10 +00002947 wdata->page_offset = start;
2948 wdata->tailsz =
2949 nr_pages > 1 ?
2950 cur_len - (PAGE_SIZE - start) -
2951 (nr_pages - 2) * PAGE_SIZE :
2952 cur_len;
2953 } else {
2954 nr_pages = get_numpages(wsize, len, &cur_len);
2955 wdata = cifs_writedata_alloc(nr_pages,
2956 cifs_uncached_writev_complete);
2957 if (!wdata) {
2958 rc = -ENOMEM;
2959 add_credits_and_wake_if(server, credits, 0);
2960 break;
2961 }
Jeff Layton5d81de82014-02-14 07:20:35 -05002962
Long Li8c5f9c12018-10-31 22:13:10 +00002963 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2964 if (rc) {
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08002965 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00002966 kfree(wdata);
2967 add_credits_and_wake_if(server, credits, 0);
2968 break;
2969 }
2970
2971 num_pages = nr_pages;
2972 rc = wdata_fill_from_iovec(
2973 wdata, from, &cur_len, &num_pages);
2974 if (rc) {
2975 for (i = 0; i < nr_pages; i++)
2976 put_page(wdata->pages[i]);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08002977 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00002978 kfree(wdata);
2979 add_credits_and_wake_if(server, credits, 0);
2980 break;
2981 }
2982
2983 /*
2984 * Bring nr_pages down to the number of pages we
2985 * actually used, and free any pages that we didn't use.
2986 */
2987 for ( ; nr_pages > num_pages; nr_pages--)
2988 put_page(wdata->pages[nr_pages - 1]);
2989
2990 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2991 }
Jeff Layton5d81de82014-02-14 07:20:35 -05002992
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002993 wdata->sync_mode = WB_SYNC_ALL;
2994 wdata->nr_pages = nr_pages;
2995 wdata->offset = (__u64)offset;
2996 wdata->cfile = cifsFileInfo_get(open_file);
2997 wdata->pid = pid;
2998 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002999 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003000 wdata->credits = credits_on_stack;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003001 wdata->ctx = ctx;
3002 kref_get(&ctx->refcount);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04003003
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003004 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
3005
3006 if (!rc) {
3007 if (wdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003008 rc = -EAGAIN;
3009 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003010 rc = server->ops->async_writev(wdata,
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04003011 cifs_uncached_writedata_release);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003012 }
3013
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003014 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003015 add_credits_and_wake_if(server, &wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06003016 kref_put(&wdata->refcount,
3017 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04003018 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04003019 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04003020 iov_iter_advance(from, offset - saved_offset);
3021 continue;
3022 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003023 break;
3024 }
3025
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04003026 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003027 offset += cur_len;
3028 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003029 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003030
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003031 free_xid(xid);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003032 return rc;
3033}
3034
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003035static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
3036{
3037 struct cifs_writedata *wdata, *tmp;
3038 struct cifs_tcon *tcon;
3039 struct cifs_sb_info *cifs_sb;
3040 struct dentry *dentry = ctx->cfile->dentry;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003041 int rc;
3042
3043 tcon = tlink_tcon(ctx->cfile->tlink);
3044 cifs_sb = CIFS_SB(dentry->d_sb);
3045
3046 mutex_lock(&ctx->aio_mutex);
3047
3048 if (list_empty(&ctx->list)) {
3049 mutex_unlock(&ctx->aio_mutex);
3050 return;
3051 }
3052
3053 rc = ctx->rc;
3054 /*
3055 * Wait for and collect replies for any successful sends in order of
3056 * increasing offset. Once an error is hit, then return without waiting
3057 * for any more replies.
3058 */
3059restart_loop:
3060 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
3061 if (!rc) {
3062 if (!try_wait_for_completion(&wdata->done)) {
3063 mutex_unlock(&ctx->aio_mutex);
3064 return;
3065 }
3066
3067 if (wdata->result)
3068 rc = wdata->result;
3069 else
3070 ctx->total_len += wdata->bytes;
3071
3072 /* resend call if it's a retryable error */
3073 if (rc == -EAGAIN) {
3074 struct list_head tmp_list;
3075 struct iov_iter tmp_from = ctx->iter;
3076
3077 INIT_LIST_HEAD(&tmp_list);
3078 list_del_init(&wdata->list);
3079
Long Li8c5f9c12018-10-31 22:13:10 +00003080 if (ctx->direct_io)
3081 rc = cifs_resend_wdata(
3082 wdata, &tmp_list, ctx);
3083 else {
3084 iov_iter_advance(&tmp_from,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003085 wdata->offset - ctx->pos);
3086
Long Li8c5f9c12018-10-31 22:13:10 +00003087 rc = cifs_write_from_iter(wdata->offset,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003088 wdata->bytes, &tmp_from,
3089 ctx->cfile, cifs_sb, &tmp_list,
3090 ctx);
Long Lid53e2922019-03-15 07:54:59 +00003091
3092 kref_put(&wdata->refcount,
3093 cifs_uncached_writedata_release);
Long Li8c5f9c12018-10-31 22:13:10 +00003094 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003095
3096 list_splice(&tmp_list, &ctx->list);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003097 goto restart_loop;
3098 }
3099 }
3100 list_del_init(&wdata->list);
3101 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
3102 }
3103
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003104 cifs_stats_bytes_written(tcon, ctx->total_len);
3105 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
3106
3107 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3108
3109 mutex_unlock(&ctx->aio_mutex);
3110
3111 if (ctx->iocb && ctx->iocb->ki_complete)
3112 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3113 else
3114 complete(&ctx->done);
3115}
3116
Long Li8c5f9c12018-10-31 22:13:10 +00003117static ssize_t __cifs_writev(
3118 struct kiocb *iocb, struct iov_iter *from, bool direct)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003119{
Al Viroe9d15932015-04-06 22:44:11 -04003120 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003121 ssize_t total_written = 0;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003122 struct cifsFileInfo *cfile;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003123 struct cifs_tcon *tcon;
3124 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003125 struct cifs_aio_ctx *ctx;
Al Virofc56b982016-09-21 18:18:23 -04003126 struct iov_iter saved_from = *from;
Long Li8c5f9c12018-10-31 22:13:10 +00003127 size_t len = iov_iter_count(from);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003128 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003129
Al Viroe9d15932015-04-06 22:44:11 -04003130 /*
Long Li8c5f9c12018-10-31 22:13:10 +00003131 * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
3132 * In this case, fall back to non-direct write function.
3133 * this could be improved by getting pages directly in ITER_KVEC
Al Viroe9d15932015-04-06 22:44:11 -04003134 */
Long Li8c5f9c12018-10-31 22:13:10 +00003135 if (direct && from->type & ITER_KVEC) {
3136 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
3137 direct = false;
3138 }
Al Viroe9d15932015-04-06 22:44:11 -04003139
Al Viro3309dd02015-04-09 12:55:47 -04003140 rc = generic_write_checks(iocb, from);
3141 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003142 return rc;
3143
Al Viro7119e222014-10-22 00:25:12 -04003144 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003145 cfile = file->private_data;
3146 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003147
3148 if (!tcon->ses->server->ops->async_writev)
3149 return -ENOSYS;
3150
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003151 ctx = cifs_aio_ctx_alloc();
3152 if (!ctx)
3153 return -ENOMEM;
3154
3155 ctx->cfile = cifsFileInfo_get(cfile);
3156
3157 if (!is_sync_kiocb(iocb))
3158 ctx->iocb = iocb;
3159
3160 ctx->pos = iocb->ki_pos;
3161
Long Li8c5f9c12018-10-31 22:13:10 +00003162 if (direct) {
3163 ctx->direct_io = true;
3164 ctx->iter = *from;
3165 ctx->len = len;
3166 } else {
3167 rc = setup_aio_ctx_iter(ctx, from, WRITE);
3168 if (rc) {
3169 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3170 return rc;
3171 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003172 }
3173
3174 /* grab a lock here due to read response handlers can access ctx */
3175 mutex_lock(&ctx->aio_mutex);
3176
3177 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
3178 cfile, cifs_sb, &ctx->list, ctx);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003179
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003180 /*
3181 * If at least one write was successfully sent, then discard any rc
3182 * value from the later writes. If the other write succeeds, then
3183 * we'll end up returning whatever was written. If it fails, then
3184 * we'll get a new rc value from that.
3185 */
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003186 if (!list_empty(&ctx->list))
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003187 rc = 0;
3188
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003189 mutex_unlock(&ctx->aio_mutex);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003190
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003191 if (rc) {
3192 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3193 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003194 }
3195
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003196 if (!is_sync_kiocb(iocb)) {
3197 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3198 return -EIOCBQUEUED;
3199 }
3200
3201 rc = wait_for_completion_killable(&ctx->done);
3202 if (rc) {
3203 mutex_lock(&ctx->aio_mutex);
3204 ctx->rc = rc = -EINTR;
3205 total_written = ctx->total_len;
3206 mutex_unlock(&ctx->aio_mutex);
3207 } else {
3208 rc = ctx->rc;
3209 total_written = ctx->total_len;
3210 }
3211
3212 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3213
Al Viroe9d15932015-04-06 22:44:11 -04003214 if (unlikely(!total_written))
3215 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003216
Al Viroe9d15932015-04-06 22:44:11 -04003217 iocb->ki_pos += total_written;
Al Viroe9d15932015-04-06 22:44:11 -04003218 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003219}
3220
Long Li8c5f9c12018-10-31 22:13:10 +00003221ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
3222{
3223 return __cifs_writev(iocb, from, true);
3224}
3225
3226ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
3227{
3228 return __cifs_writev(iocb, from, false);
3229}
3230
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003231static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003232cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003233{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003234 struct file *file = iocb->ki_filp;
3235 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
3236 struct inode *inode = file->f_mapping->host;
3237 struct cifsInodeInfo *cinode = CIFS_I(inode);
3238 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04003239 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003240
Rabin Vincent966681c2017-06-29 16:01:42 +02003241 inode_lock(inode);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003242 /*
3243 * We need to hold the sem to be sure nobody modifies lock list
3244 * with a brlock that prevents writing.
3245 */
3246 down_read(&cinode->lock_sem);
Al Viro5f380c72015-04-07 11:28:12 -04003247
Al Viro3309dd02015-04-09 12:55:47 -04003248 rc = generic_write_checks(iocb, from);
3249 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04003250 goto out;
3251
Al Viro5f380c72015-04-07 11:28:12 -04003252 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Ronnie Sahlberg96457592018-10-04 09:24:38 +10003253 server->vals->exclusive_lock_type, 0,
3254 NULL, CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04003255 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04003256 else
3257 rc = -EACCES;
3258out:
Rabin Vincent966681c2017-06-29 16:01:42 +02003259 up_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05003260 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04003261
Christoph Hellwige2592212016-04-07 08:52:01 -07003262 if (rc > 0)
3263 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003264 return rc;
3265}
3266
3267ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003268cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003269{
Al Viro496ad9a2013-01-23 17:07:38 -05003270 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003271 struct cifsInodeInfo *cinode = CIFS_I(inode);
3272 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3273 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3274 iocb->ki_filp->private_data;
3275 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003276 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003277
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003278 written = cifs_get_writer(cinode);
3279 if (written)
3280 return written;
3281
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003282 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003283 if (cap_unix(tcon->ses) &&
3284 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003285 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04003286 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003287 goto out;
3288 }
Al Viro3dae8752014-04-03 12:05:17 -04003289 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003290 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04003291 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003292 /*
3293 * For non-oplocked files in strict cache mode we need to write the data
3294 * to the server exactly from the pos to pos+len-1 rather than flush all
3295 * affected pages because it may cause a error with mandatory locks on
3296 * these pages but not on the region from pos to ppos+len-1.
3297 */
Al Viro3dae8752014-04-03 12:05:17 -04003298 written = cifs_user_writev(iocb, from);
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003299 if (CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003300 /*
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003301 * We have read level caching and we have just sent a write
3302 * request to the server thus making data in the cache stale.
3303 * Zap the cache and set oplock/lease level to NONE to avoid
3304 * reading stale data from the cache. All subsequent read
3305 * operations will read new data from the server.
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003306 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003307 cifs_zap_mapping(inode);
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003308 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05003309 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003310 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003311 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003312out:
3313 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003314 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003315}
3316
Jeff Layton0471ca32012-05-16 07:13:16 -04003317static struct cifs_readdata *
Long Lif9f5aca2018-05-30 12:47:54 -07003318cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04003319{
3320 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003321
Long Lif9f5aca2018-05-30 12:47:54 -07003322 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04003323 if (rdata != NULL) {
Long Lif9f5aca2018-05-30 12:47:54 -07003324 rdata->pages = pages;
Jeff Layton6993f742012-05-16 07:13:17 -04003325 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003326 INIT_LIST_HEAD(&rdata->list);
3327 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04003328 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04003329 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003330
Jeff Layton0471ca32012-05-16 07:13:16 -04003331 return rdata;
3332}
3333
Long Lif9f5aca2018-05-30 12:47:54 -07003334static struct cifs_readdata *
3335cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
3336{
3337 struct page **pages =
Kees Cook6396bb22018-06-12 14:03:40 -07003338 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Long Lif9f5aca2018-05-30 12:47:54 -07003339 struct cifs_readdata *ret = NULL;
3340
3341 if (pages) {
3342 ret = cifs_readdata_direct_alloc(pages, complete);
3343 if (!ret)
3344 kfree(pages);
3345 }
3346
3347 return ret;
3348}
3349
Jeff Layton6993f742012-05-16 07:13:17 -04003350void
3351cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04003352{
Jeff Layton6993f742012-05-16 07:13:17 -04003353 struct cifs_readdata *rdata = container_of(refcount,
3354 struct cifs_readdata, refcount);
Long Libd3dcc62017-11-22 17:38:47 -07003355#ifdef CONFIG_CIFS_SMB_DIRECT
3356 if (rdata->mr) {
3357 smbd_deregister_mr(rdata->mr);
3358 rdata->mr = NULL;
3359 }
3360#endif
Jeff Layton6993f742012-05-16 07:13:17 -04003361 if (rdata->cfile)
3362 cifsFileInfo_put(rdata->cfile);
3363
Long Lif9f5aca2018-05-30 12:47:54 -07003364 kvfree(rdata->pages);
Jeff Layton0471ca32012-05-16 07:13:16 -04003365 kfree(rdata);
3366}
3367
Jeff Layton2a1bb132012-05-16 07:13:17 -04003368static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003369cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04003370{
3371 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003372 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04003373 unsigned int i;
3374
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003375 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04003376 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3377 if (!page) {
3378 rc = -ENOMEM;
3379 break;
3380 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003381 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04003382 }
3383
3384 if (rc) {
Roberto Bergantinos Corpas31fad7d2019-05-28 09:38:14 +02003385 unsigned int nr_page_failed = i;
3386
3387 for (i = 0; i < nr_page_failed; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003388 put_page(rdata->pages[i]);
3389 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04003390 }
3391 }
3392 return rc;
3393}
3394
3395static void
3396cifs_uncached_readdata_release(struct kref *refcount)
3397{
Jeff Layton1c892542012-05-16 07:13:17 -04003398 struct cifs_readdata *rdata = container_of(refcount,
3399 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003400 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003401
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003402 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003403 for (i = 0; i < rdata->nr_pages; i++) {
3404 put_page(rdata->pages[i]);
Jeff Layton1c892542012-05-16 07:13:17 -04003405 }
3406 cifs_readdata_release(refcount);
3407}
3408
Jeff Layton1c892542012-05-16 07:13:17 -04003409/**
3410 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3411 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05003412 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04003413 *
3414 * This function copies data from a list of pages in a readdata response into
3415 * an array of iovecs. It will first calculate where the data should go
3416 * based on the info in the readdata and then copy the data into that spot.
3417 */
Al Viro7f25bba2014-02-04 14:07:43 -05003418static int
3419cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04003420{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04003421 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003422 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003423
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003424 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003425 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02003426 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003427 size_t written;
3428
David Howells00e23702018-10-22 13:07:28 +01003429 if (unlikely(iov_iter_is_pipe(iter))) {
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003430 void *addr = kmap_atomic(page);
3431
3432 written = copy_to_iter(addr, copy, iter);
3433 kunmap_atomic(addr);
3434 } else
3435 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05003436 remaining -= written;
3437 if (written < copy && iov_iter_count(iter) > 0)
3438 break;
Jeff Layton1c892542012-05-16 07:13:17 -04003439 }
Al Viro7f25bba2014-02-04 14:07:43 -05003440 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04003441}
3442
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003443static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3444
Jeff Layton1c892542012-05-16 07:13:17 -04003445static void
3446cifs_uncached_readv_complete(struct work_struct *work)
3447{
3448 struct cifs_readdata *rdata = container_of(work,
3449 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04003450
3451 complete(&rdata->done);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003452 collect_uncached_read_data(rdata->ctx);
3453 /* the below call can possibly free the last ref to aio ctx */
Jeff Layton1c892542012-05-16 07:13:17 -04003454 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3455}
3456
3457static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003458uncached_fill_pages(struct TCP_Server_Info *server,
3459 struct cifs_readdata *rdata, struct iov_iter *iter,
3460 unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04003461{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003462 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003463 unsigned int i;
3464 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003465 unsigned int page_offset = rdata->page_offset;
Jeff Layton1c892542012-05-16 07:13:17 -04003466
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003467 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07003468 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003469 for (i = 0; i < nr_pages; i++) {
3470 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05003471 size_t n;
Long Li1dbe3462018-05-30 12:47:55 -07003472 unsigned int segment_size = rdata->pagesz;
3473
3474 if (i == 0)
3475 segment_size -= page_offset;
3476 else
3477 page_offset = 0;
3478
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003479
Al Viro71335662016-01-09 19:54:50 -05003480 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04003481 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003482 rdata->pages[i] = NULL;
3483 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04003484 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07003485 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003486 }
Long Li1dbe3462018-05-30 12:47:55 -07003487
Al Viro71335662016-01-09 19:54:50 -05003488 n = len;
Long Li1dbe3462018-05-30 12:47:55 -07003489 if (len >= segment_size)
Al Viro71335662016-01-09 19:54:50 -05003490 /* enough data to fill the page */
Long Li1dbe3462018-05-30 12:47:55 -07003491 n = segment_size;
3492 else
Al Viro71335662016-01-09 19:54:50 -05003493 rdata->tailsz = len;
Long Li1dbe3462018-05-30 12:47:55 -07003494 len -= n;
3495
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003496 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003497 result = copy_page_from_iter(
3498 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003499#ifdef CONFIG_CIFS_SMB_DIRECT
3500 else if (rdata->mr)
3501 result = n;
3502#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003503 else
Long Li1dbe3462018-05-30 12:47:55 -07003504 result = cifs_read_page_from_socket(
3505 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003506 if (result < 0)
3507 break;
3508
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003509 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003510 }
3511
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003512 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3513 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003514}
3515
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003516static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003517cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3518 struct cifs_readdata *rdata, unsigned int len)
3519{
3520 return uncached_fill_pages(server, rdata, NULL, len);
3521}
3522
3523static int
3524cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3525 struct cifs_readdata *rdata,
3526 struct iov_iter *iter)
3527{
3528 return uncached_fill_pages(server, rdata, iter, iter->count);
3529}
3530
Long Li6e6e2b82018-10-31 22:13:09 +00003531static int cifs_resend_rdata(struct cifs_readdata *rdata,
3532 struct list_head *rdata_list,
3533 struct cifs_aio_ctx *ctx)
3534{
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003535 unsigned int rsize;
3536 struct cifs_credits credits;
Long Li6e6e2b82018-10-31 22:13:09 +00003537 int rc;
3538 struct TCP_Server_Info *server =
3539 tlink_tcon(rdata->cfile->tlink)->ses->server;
3540
Long Li6e6e2b82018-10-31 22:13:09 +00003541 do {
Long Li0b0dfd52019-03-15 07:55:00 +00003542 if (rdata->cfile->invalidHandle) {
3543 rc = cifs_reopen_file(rdata->cfile, true);
3544 if (rc == -EAGAIN)
3545 continue;
3546 else if (rc)
3547 break;
3548 }
3549
3550 /*
3551 * Wait for credits to resend this rdata.
3552 * Note: we are attempting to resend the whole rdata not in
3553 * segments
3554 */
3555 do {
3556 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
Long Li6e6e2b82018-10-31 22:13:09 +00003557 &rsize, &credits);
3558
Long Li0b0dfd52019-03-15 07:55:00 +00003559 if (rc)
3560 goto fail;
Long Li6e6e2b82018-10-31 22:13:09 +00003561
Long Li0b0dfd52019-03-15 07:55:00 +00003562 if (rsize < rdata->bytes) {
3563 add_credits_and_wake_if(server, &credits, 0);
3564 msleep(1000);
3565 }
3566 } while (rsize < rdata->bytes);
3567 rdata->credits = credits;
3568
3569 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3570 if (!rc) {
3571 if (rdata->cfile->invalidHandle)
3572 rc = -EAGAIN;
Long Lib7a55bb2019-10-15 22:54:50 +00003573 else {
3574#ifdef CONFIG_CIFS_SMB_DIRECT
3575 if (rdata->mr) {
3576 rdata->mr->need_invalidate = true;
3577 smbd_deregister_mr(rdata->mr);
3578 rdata->mr = NULL;
3579 }
3580#endif
Long Li0b0dfd52019-03-15 07:55:00 +00003581 rc = server->ops->async_readv(rdata);
Long Lib7a55bb2019-10-15 22:54:50 +00003582 }
Long Li6e6e2b82018-10-31 22:13:09 +00003583 }
Long Li6e6e2b82018-10-31 22:13:09 +00003584
Long Li0b0dfd52019-03-15 07:55:00 +00003585 /* If the read was successfully sent, we are done */
3586 if (!rc) {
3587 /* Add to aio pending list */
3588 list_add_tail(&rdata->list, rdata_list);
3589 return 0;
3590 }
Long Li6e6e2b82018-10-31 22:13:09 +00003591
Long Li0b0dfd52019-03-15 07:55:00 +00003592 /* Roll back credits and retry if needed */
3593 add_credits_and_wake_if(server, &rdata->credits, 0);
3594 } while (rc == -EAGAIN);
Long Li6e6e2b82018-10-31 22:13:09 +00003595
Long Li0b0dfd52019-03-15 07:55:00 +00003596fail:
3597 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Long Li6e6e2b82018-10-31 22:13:09 +00003598 return rc;
3599}
3600
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003601static int
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003602cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003603 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3604 struct cifs_aio_ctx *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003606 struct cifs_readdata *rdata;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003607 unsigned int npages, rsize;
3608 struct cifs_credits credits_on_stack;
3609 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003610 size_t cur_len;
3611 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003612 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003613 struct TCP_Server_Info *server;
Long Li6e6e2b82018-10-31 22:13:09 +00003614 struct page **pagevec;
3615 size_t start;
3616 struct iov_iter direct_iov = ctx->iter;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003617
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003618 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003619
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003620 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3621 pid = open_file->pid;
3622 else
3623 pid = current->tgid;
3624
Long Li6e6e2b82018-10-31 22:13:09 +00003625 if (ctx->direct_io)
3626 iov_iter_advance(&direct_iov, offset - ctx->pos);
3627
Jeff Layton1c892542012-05-16 07:13:17 -04003628 do {
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003629 if (open_file->invalidHandle) {
3630 rc = cifs_reopen_file(open_file, true);
3631 if (rc == -EAGAIN)
3632 continue;
3633 else if (rc)
3634 break;
3635 }
3636
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003637 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003638 &rsize, credits);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003639 if (rc)
3640 break;
3641
3642 cur_len = min_t(const size_t, len, rsize);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003643
Long Li6e6e2b82018-10-31 22:13:09 +00003644 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05003645 ssize_t result;
Long Li6e6e2b82018-10-31 22:13:09 +00003646
Steve Frenchb98e26d2018-11-01 10:54:32 -05003647 result = iov_iter_get_pages_alloc(
Long Li6e6e2b82018-10-31 22:13:09 +00003648 &direct_iov, &pagevec,
3649 cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05003650 if (result < 0) {
Long Li6e6e2b82018-10-31 22:13:09 +00003651 cifs_dbg(VFS,
Long Li54e94ff2018-12-16 22:41:07 +00003652 "couldn't get user pages (rc=%zd)"
Long Li6e6e2b82018-10-31 22:13:09 +00003653 " iter type %d"
3654 " iov_offset %zd count %zd\n",
Steve Frenchb98e26d2018-11-01 10:54:32 -05003655 result, direct_iov.type,
Long Li6e6e2b82018-10-31 22:13:09 +00003656 direct_iov.iov_offset,
3657 direct_iov.count);
3658 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00003659
3660 rc = result;
3661 add_credits_and_wake_if(server, credits, 0);
Long Li6e6e2b82018-10-31 22:13:09 +00003662 break;
3663 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05003664 cur_len = (size_t)result;
Long Li6e6e2b82018-10-31 22:13:09 +00003665 iov_iter_advance(&direct_iov, cur_len);
3666
3667 rdata = cifs_readdata_direct_alloc(
3668 pagevec, cifs_uncached_readv_complete);
3669 if (!rdata) {
3670 add_credits_and_wake_if(server, credits, 0);
3671 rc = -ENOMEM;
3672 break;
3673 }
3674
3675 npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
3676 rdata->page_offset = start;
3677 rdata->tailsz = npages > 1 ?
3678 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
3679 cur_len;
3680
3681 } else {
3682
3683 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3684 /* allocate a readdata struct */
3685 rdata = cifs_readdata_alloc(npages,
Jeff Layton1c892542012-05-16 07:13:17 -04003686 cifs_uncached_readv_complete);
Long Li6e6e2b82018-10-31 22:13:09 +00003687 if (!rdata) {
3688 add_credits_and_wake_if(server, credits, 0);
3689 rc = -ENOMEM;
3690 break;
3691 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003692
Long Li6e6e2b82018-10-31 22:13:09 +00003693 rc = cifs_read_allocate_pages(rdata, npages);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08003694 if (rc) {
3695 kvfree(rdata->pages);
3696 kfree(rdata);
3697 add_credits_and_wake_if(server, credits, 0);
3698 break;
3699 }
Long Li6e6e2b82018-10-31 22:13:09 +00003700
3701 rdata->tailsz = PAGE_SIZE;
3702 }
Jeff Layton1c892542012-05-16 07:13:17 -04003703
3704 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003705 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003706 rdata->offset = offset;
3707 rdata->bytes = cur_len;
3708 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003709 rdata->pagesz = PAGE_SIZE;
3710 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003711 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003712 rdata->credits = credits_on_stack;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003713 rdata->ctx = ctx;
3714 kref_get(&ctx->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003715
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003716 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3717
3718 if (!rc) {
3719 if (rdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003720 rc = -EAGAIN;
3721 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003722 rc = server->ops->async_readv(rdata);
3723 }
3724
Jeff Layton1c892542012-05-16 07:13:17 -04003725 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003726 add_credits_and_wake_if(server, &rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003727 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003728 cifs_uncached_readdata_release);
3729 if (rc == -EAGAIN) {
3730 iov_iter_revert(&direct_iov, cur_len);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003731 continue;
Long Li6e6e2b82018-10-31 22:13:09 +00003732 }
Jeff Layton1c892542012-05-16 07:13:17 -04003733 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003734 }
Jeff Layton1c892542012-05-16 07:13:17 -04003735
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003736 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003737 offset += cur_len;
3738 len -= cur_len;
3739 } while (len > 0);
3740
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003741 return rc;
3742}
3743
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003744static void
3745collect_uncached_read_data(struct cifs_aio_ctx *ctx)
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003746{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003747 struct cifs_readdata *rdata, *tmp;
3748 struct iov_iter *to = &ctx->iter;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003749 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003750 int rc;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003751
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003752 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003753
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003754 mutex_lock(&ctx->aio_mutex);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003755
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003756 if (list_empty(&ctx->list)) {
3757 mutex_unlock(&ctx->aio_mutex);
3758 return;
3759 }
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003760
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003761 rc = ctx->rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003762 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003763again:
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003764 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
Jeff Layton1c892542012-05-16 07:13:17 -04003765 if (!rc) {
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003766 if (!try_wait_for_completion(&rdata->done)) {
3767 mutex_unlock(&ctx->aio_mutex);
3768 return;
3769 }
3770
3771 if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003772 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003773 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003774 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003775
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003776 list_del_init(&rdata->list);
3777 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003778
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003779 /*
3780 * Got a part of data and then reconnect has
3781 * happened -- fill the buffer and continue
3782 * reading.
3783 */
3784 if (got_bytes && got_bytes < rdata->bytes) {
Long Li6e6e2b82018-10-31 22:13:09 +00003785 rc = 0;
3786 if (!ctx->direct_io)
3787 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003788 if (rc) {
3789 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003790 cifs_uncached_readdata_release);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003791 continue;
3792 }
3793 }
3794
Long Li6e6e2b82018-10-31 22:13:09 +00003795 if (ctx->direct_io) {
3796 /*
3797 * Re-use rdata as this is a
3798 * direct I/O
3799 */
3800 rc = cifs_resend_rdata(
3801 rdata,
3802 &tmp_list, ctx);
3803 } else {
3804 rc = cifs_send_async_read(
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003805 rdata->offset + got_bytes,
3806 rdata->bytes - got_bytes,
3807 rdata->cfile, cifs_sb,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003808 &tmp_list, ctx);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003809
Long Li6e6e2b82018-10-31 22:13:09 +00003810 kref_put(&rdata->refcount,
3811 cifs_uncached_readdata_release);
3812 }
3813
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003814 list_splice(&tmp_list, &ctx->list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003815
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003816 goto again;
3817 } else if (rdata->result)
3818 rc = rdata->result;
Long Li6e6e2b82018-10-31 22:13:09 +00003819 else if (!ctx->direct_io)
Jeff Layton1c892542012-05-16 07:13:17 -04003820 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003821
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003822 /* if there was a short read -- discard anything left */
3823 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3824 rc = -ENODATA;
Long Li6e6e2b82018-10-31 22:13:09 +00003825
3826 ctx->total_len += rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003827 }
3828 list_del_init(&rdata->list);
3829 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003831
Jérôme Glisse13f59382019-04-10 15:37:47 -04003832 if (!ctx->direct_io)
Long Li6e6e2b82018-10-31 22:13:09 +00003833 ctx->total_len = ctx->len - iov_iter_count(to);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003834
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003835 /* mask nodata case */
3836 if (rc == -ENODATA)
3837 rc = 0;
3838
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003839 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3840
3841 mutex_unlock(&ctx->aio_mutex);
3842
3843 if (ctx->iocb && ctx->iocb->ki_complete)
3844 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3845 else
3846 complete(&ctx->done);
3847}
3848
Long Li6e6e2b82018-10-31 22:13:09 +00003849static ssize_t __cifs_readv(
3850 struct kiocb *iocb, struct iov_iter *to, bool direct)
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003851{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003852 size_t len;
Long Li6e6e2b82018-10-31 22:13:09 +00003853 struct file *file = iocb->ki_filp;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003854 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003855 struct cifsFileInfo *cfile;
Long Li6e6e2b82018-10-31 22:13:09 +00003856 struct cifs_tcon *tcon;
3857 ssize_t rc, total_read = 0;
3858 loff_t offset = iocb->ki_pos;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003859 struct cifs_aio_ctx *ctx;
3860
Long Li6e6e2b82018-10-31 22:13:09 +00003861 /*
3862 * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
3863 * fall back to data copy read path
3864 * this could be improved by getting pages directly in ITER_KVEC
3865 */
3866 if (direct && to->type & ITER_KVEC) {
3867 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
3868 direct = false;
3869 }
3870
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003871 len = iov_iter_count(to);
3872 if (!len)
3873 return 0;
3874
3875 cifs_sb = CIFS_FILE_SB(file);
3876 cfile = file->private_data;
3877 tcon = tlink_tcon(cfile->tlink);
3878
3879 if (!tcon->ses->server->ops->async_readv)
3880 return -ENOSYS;
3881
3882 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3883 cifs_dbg(FYI, "attempting read on write only file instance\n");
3884
3885 ctx = cifs_aio_ctx_alloc();
3886 if (!ctx)
3887 return -ENOMEM;
3888
3889 ctx->cfile = cifsFileInfo_get(cfile);
3890
3891 if (!is_sync_kiocb(iocb))
3892 ctx->iocb = iocb;
3893
David Howells00e23702018-10-22 13:07:28 +01003894 if (iter_is_iovec(to))
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003895 ctx->should_dirty = true;
3896
Long Li6e6e2b82018-10-31 22:13:09 +00003897 if (direct) {
3898 ctx->pos = offset;
3899 ctx->direct_io = true;
3900 ctx->iter = *to;
3901 ctx->len = len;
3902 } else {
3903 rc = setup_aio_ctx_iter(ctx, to, READ);
3904 if (rc) {
3905 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3906 return rc;
3907 }
3908 len = ctx->len;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003909 }
3910
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003911 /* grab a lock here due to read response handlers can access ctx */
3912 mutex_lock(&ctx->aio_mutex);
3913
3914 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
3915
3916 /* if at least one read request send succeeded, then reset rc */
3917 if (!list_empty(&ctx->list))
3918 rc = 0;
3919
3920 mutex_unlock(&ctx->aio_mutex);
3921
3922 if (rc) {
3923 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3924 return rc;
3925 }
3926
3927 if (!is_sync_kiocb(iocb)) {
3928 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3929 return -EIOCBQUEUED;
3930 }
3931
3932 rc = wait_for_completion_killable(&ctx->done);
3933 if (rc) {
3934 mutex_lock(&ctx->aio_mutex);
3935 ctx->rc = rc = -EINTR;
3936 total_read = ctx->total_len;
3937 mutex_unlock(&ctx->aio_mutex);
3938 } else {
3939 rc = ctx->rc;
3940 total_read = ctx->total_len;
3941 }
3942
3943 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3944
Al Viro0165e812014-02-04 14:19:48 -05003945 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003946 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003947 return total_read;
3948 }
3949 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003950}
3951
Long Li6e6e2b82018-10-31 22:13:09 +00003952ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
3953{
3954 return __cifs_readv(iocb, to, true);
3955}
3956
3957ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3958{
3959 return __cifs_readv(iocb, to, false);
3960}
3961
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003962ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003963cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003964{
Al Viro496ad9a2013-01-23 17:07:38 -05003965 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003966 struct cifsInodeInfo *cinode = CIFS_I(inode);
3967 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3968 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3969 iocb->ki_filp->private_data;
3970 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3971 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003972
3973 /*
3974 * In strict cache mode we need to read from the server all the time
3975 * if we don't have level II oplock because the server can delay mtime
3976 * change - so we can't make a decision about inode invalidating.
3977 * And we can also fail with pagereading if there are mandatory locks
3978 * on pages affected by this read but not on the region from pos to
3979 * pos+len-1.
3980 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003981 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003982 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003983
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003984 if (cap_unix(tcon->ses) &&
3985 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3986 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003987 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003988
3989 /*
3990 * We need to hold the sem to be sure nobody modifies lock list
3991 * with a brlock that prevents reading.
3992 */
3993 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003994 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003995 tcon->ses->server->vals->shared_lock_type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10003996 0, NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003997 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003998 up_read(&cinode->lock_sem);
3999 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03004000}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004002static ssize_t
4003cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004{
4005 int rc = -EACCES;
4006 unsigned int bytes_read = 0;
4007 unsigned int total_read;
4008 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04004009 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004011 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004012 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004013 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004014 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004016 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08004017 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004018 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004020 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04004021 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022
Jeff Layton5eba8ab2011-10-19 15:30:26 -04004023 /* FIXME: set up handlers for larger reads and/or convert to async */
4024 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
4025
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304027 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004028 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304029 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004030 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07004031 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004032 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004033 server = tcon->ses->server;
4034
4035 if (!server->ops->sync_read) {
4036 free_xid(xid);
4037 return -ENOSYS;
4038 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004040 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4041 pid = open_file->pid;
4042 else
4043 pid = current->tgid;
4044
Linus Torvalds1da177e2005-04-16 15:20:36 -07004045 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05004046 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004048 for (total_read = 0, cur_offset = read_data; read_size > total_read;
4049 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04004050 do {
4051 current_read_size = min_t(uint, read_size - total_read,
4052 rsize);
4053 /*
4054 * For windows me and 9x we do not want to request more
4055 * than it negotiated since it will refuse the read
4056 * then.
4057 */
4058 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004059 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04004060 current_read_size = min_t(uint,
4061 current_read_size, CIFSMaxBufSize);
4062 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00004063 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04004064 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065 if (rc != 0)
4066 break;
4067 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004068 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004069 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004070 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004071 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05004072 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004073 &bytes_read, &cur_offset,
4074 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04004075 } while (rc == -EAGAIN);
4076
Linus Torvalds1da177e2005-04-16 15:20:36 -07004077 if (rc || (bytes_read == 0)) {
4078 if (total_read) {
4079 break;
4080 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004081 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004082 return rc;
4083 }
4084 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004085 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004086 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087 }
4088 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004089 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090 return total_read;
4091}
4092
Jeff Laytonca83ce32011-04-12 09:13:44 -04004093/*
4094 * If the page is mmap'ed into a process' page tables, then we need to make
4095 * sure that it doesn't change while being written back.
4096 */
Souptick Joardera5240cb2018-04-15 00:58:25 +05304097static vm_fault_t
Dave Jiang11bac802017-02-24 14:56:41 -08004098cifs_page_mkwrite(struct vm_fault *vmf)
Jeff Laytonca83ce32011-04-12 09:13:44 -04004099{
4100 struct page *page = vmf->page;
4101
4102 lock_page(page);
4103 return VM_FAULT_LOCKED;
4104}
4105
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07004106static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04004107 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07004108 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04004109 .page_mkwrite = cifs_page_mkwrite,
4110};
4111
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004112int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
4113{
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004114 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05004115 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004116
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004117 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004118
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004119 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004120 rc = cifs_zap_mapping(inode);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004121 if (!rc)
4122 rc = generic_file_mmap(file, vma);
4123 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04004124 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004125
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004126 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004127 return rc;
4128}
4129
Linus Torvalds1da177e2005-04-16 15:20:36 -07004130int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
4131{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132 int rc, xid;
4133
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004134 xid = get_xid();
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004135
Jeff Laytonabab0952010-02-12 07:44:18 -05004136 rc = cifs_revalidate_file(file);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004137 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004138 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
4139 rc);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004140 if (!rc)
4141 rc = generic_file_mmap(file, vma);
4142 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04004143 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004144
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004145 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146 return rc;
4147}
4148
Jeff Layton0471ca32012-05-16 07:13:16 -04004149static void
4150cifs_readv_complete(struct work_struct *work)
4151{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004152 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04004153 struct cifs_readdata *rdata = container_of(work,
4154 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04004155
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004156 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004157 for (i = 0; i < rdata->nr_pages; i++) {
4158 struct page *page = rdata->pages[i];
4159
Jeff Layton0471ca32012-05-16 07:13:16 -04004160 lru_cache_add_file(page);
4161
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004162 if (rdata->result == 0 ||
4163 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04004164 flush_dcache_page(page);
4165 SetPageUptodate(page);
4166 }
4167
4168 unlock_page(page);
4169
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004170 if (rdata->result == 0 ||
4171 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04004172 cifs_readpage_to_fscache(rdata->mapping->host, page);
4173
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004174 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004175
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004176 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004177 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04004178 }
Jeff Layton6993f742012-05-16 07:13:17 -04004179 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04004180}
4181
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004182static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004183readpages_fill_pages(struct TCP_Server_Info *server,
4184 struct cifs_readdata *rdata, struct iov_iter *iter,
4185 unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004186{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004187 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004188 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004189 u64 eof;
4190 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004191 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07004192 unsigned int page_offset = rdata->page_offset;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004193
4194 /* determine the eof that the server (probably) has */
4195 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004196 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05004197 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004198
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004199 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004200 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004201 for (i = 0; i < nr_pages; i++) {
4202 struct page *page = rdata->pages[i];
Long Li1dbe3462018-05-30 12:47:55 -07004203 unsigned int to_read = rdata->pagesz;
4204 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004205
Long Li1dbe3462018-05-30 12:47:55 -07004206 if (i == 0)
4207 to_read -= page_offset;
4208 else
4209 page_offset = 0;
4210
4211 n = to_read;
4212
4213 if (len >= to_read) {
4214 len -= to_read;
Jeff Layton8321fec2012-09-19 06:22:32 -07004215 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004216 /* enough for partial page, fill and zero the rest */
Long Li1dbe3462018-05-30 12:47:55 -07004217 zero_user(page, len + page_offset, to_read - len);
Al Viro71335662016-01-09 19:54:50 -05004218 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07004219 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004220 } else if (page->index > eof_index) {
4221 /*
4222 * The VFS will not try to do readahead past the
4223 * i_size, but it's possible that we have outstanding
4224 * writes with gaps in the middle and the i_size hasn't
4225 * caught up yet. Populate those with zeroed out pages
4226 * to prevent the VFS from repeatedly attempting to
4227 * fill them until the writes are flushed.
4228 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004229 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004230 lru_cache_add_file(page);
4231 flush_dcache_page(page);
4232 SetPageUptodate(page);
4233 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004234 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004235 rdata->pages[i] = NULL;
4236 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07004237 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004238 } else {
4239 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004240 lru_cache_add_file(page);
4241 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004242 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004243 rdata->pages[i] = NULL;
4244 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07004245 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004246 }
Jeff Layton8321fec2012-09-19 06:22:32 -07004247
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004248 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07004249 result = copy_page_from_iter(
4250 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07004251#ifdef CONFIG_CIFS_SMB_DIRECT
4252 else if (rdata->mr)
4253 result = n;
4254#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004255 else
Long Li1dbe3462018-05-30 12:47:55 -07004256 result = cifs_read_page_from_socket(
4257 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07004258 if (result < 0)
4259 break;
4260
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004261 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004262 }
4263
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004264 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
4265 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004266}
4267
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004268static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004269cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
4270 struct cifs_readdata *rdata, unsigned int len)
4271{
4272 return readpages_fill_pages(server, rdata, NULL, len);
4273}
4274
4275static int
4276cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
4277 struct cifs_readdata *rdata,
4278 struct iov_iter *iter)
4279{
4280 return readpages_fill_pages(server, rdata, iter, iter->count);
4281}
4282
4283static int
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004284readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
4285 unsigned int rsize, struct list_head *tmplist,
4286 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
4287{
4288 struct page *page, *tpage;
4289 unsigned int expected_index;
4290 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07004291 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004292
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004293 INIT_LIST_HEAD(tmplist);
4294
Nikolay Borisovf86196e2019-01-03 15:29:02 -08004295 page = lru_to_page(page_list);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004296
4297 /*
4298 * Lock the page and put it in the cache. Since no one else
4299 * should have access to this page, we're safe to simply set
4300 * PG_locked without checking it first.
4301 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004302 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004303 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07004304 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004305
4306 /* give up if we can't stick it in the cache */
4307 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004308 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004309 return rc;
4310 }
4311
4312 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004313 *offset = (loff_t)page->index << PAGE_SHIFT;
4314 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004315 *nr_pages = 1;
4316 list_move_tail(&page->lru, tmplist);
4317
4318 /* now try and add more pages onto the request */
4319 expected_index = page->index + 1;
4320 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
4321 /* discontinuity ? */
4322 if (page->index != expected_index)
4323 break;
4324
4325 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004326 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004327 break;
4328
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004329 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07004330 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004331 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004332 break;
4333 }
4334 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004335 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004336 expected_index++;
4337 (*nr_pages)++;
4338 }
4339 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340}
4341
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342static int cifs_readpages(struct file *file, struct address_space *mapping,
4343 struct list_head *page_list, unsigned num_pages)
4344{
Jeff Layton690c5e32011-10-19 15:30:16 -04004345 int rc;
4346 struct list_head tmplist;
4347 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04004348 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004349 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04004350 pid_t pid;
Steve French0cb012d2018-10-11 01:01:02 -05004351 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352
Steve French0cb012d2018-10-11 01:01:02 -05004353 xid = get_xid();
Jeff Layton690c5e32011-10-19 15:30:16 -04004354 /*
Suresh Jayaraman566982362010-07-05 18:13:25 +05304355 * Reads as many pages as possible from fscache. Returns -ENOBUFS
4356 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00004357 *
4358 * After this point, every page in the list might have PG_fscache set,
4359 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman566982362010-07-05 18:13:25 +05304360 */
4361 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
4362 &num_pages);
Steve French0cb012d2018-10-11 01:01:02 -05004363 if (rc == 0) {
4364 free_xid(xid);
Jeff Layton690c5e32011-10-19 15:30:16 -04004365 return rc;
Steve French0cb012d2018-10-11 01:01:02 -05004366 }
Suresh Jayaraman566982362010-07-05 18:13:25 +05304367
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004368 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4369 pid = open_file->pid;
4370 else
4371 pid = current->tgid;
4372
Jeff Layton690c5e32011-10-19 15:30:16 -04004373 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004374 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004375
Joe Perchesf96637b2013-05-04 22:12:25 -05004376 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4377 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04004378
4379 /*
4380 * Start with the page at end of list and move it to private
4381 * list. Do the same with any following pages until we hit
4382 * the rsize limit, hit an index discontinuity, or run out of
4383 * pages. Issue the async read and then start the loop again
4384 * until the list is empty.
4385 *
4386 * Note that list order is important. The page_list is in
4387 * the order of declining indexes. When we put the pages in
4388 * the rdata->pages, then we want them in increasing order.
4389 */
4390 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004391 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04004392 loff_t offset;
4393 struct page *page, *tpage;
4394 struct cifs_readdata *rdata;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004395 struct cifs_credits credits_on_stack;
4396 struct cifs_credits *credits = &credits_on_stack;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004397
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004398 if (open_file->invalidHandle) {
4399 rc = cifs_reopen_file(open_file, true);
4400 if (rc == -EAGAIN)
4401 continue;
4402 else if (rc)
4403 break;
4404 }
4405
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004406 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004407 &rsize, credits);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004408 if (rc)
4409 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004410
Jeff Layton690c5e32011-10-19 15:30:16 -04004411 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004412 * Give up immediately if rsize is too small to read an entire
4413 * page. The VFS will fall back to readpage. We should never
4414 * reach this point however since we set ra_pages to 0 when the
4415 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04004416 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004417 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004418 add_credits_and_wake_if(server, credits, 0);
Steve French0cb012d2018-10-11 01:01:02 -05004419 free_xid(xid);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004420 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004422
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004423 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
4424 &nr_pages, &offset, &bytes);
4425 if (rc) {
4426 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004427 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04004428 }
4429
Jeff Layton0471ca32012-05-16 07:13:16 -04004430 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04004431 if (!rdata) {
4432 /* best to give up if we're out of mem */
4433 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4434 list_del(&page->lru);
4435 lru_cache_add_file(page);
4436 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004437 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04004438 }
4439 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004440 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04004441 break;
4442 }
4443
Jeff Layton6993f742012-05-16 07:13:17 -04004444 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04004445 rdata->mapping = mapping;
4446 rdata->offset = offset;
4447 rdata->bytes = bytes;
4448 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004449 rdata->pagesz = PAGE_SIZE;
Long Li1dbe3462018-05-30 12:47:55 -07004450 rdata->tailsz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07004451 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004452 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004453 rdata->credits = credits_on_stack;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004454
4455 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4456 list_del(&page->lru);
4457 rdata->pages[rdata->nr_pages++] = page;
4458 }
Jeff Layton690c5e32011-10-19 15:30:16 -04004459
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004460 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4461
4462 if (!rc) {
4463 if (rdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004464 rc = -EAGAIN;
4465 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004466 rc = server->ops->async_readv(rdata);
4467 }
4468
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004469 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004470 add_credits_and_wake_if(server, &rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004471 for (i = 0; i < rdata->nr_pages; i++) {
4472 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04004473 lru_cache_add_file(page);
4474 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004475 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04004477 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04004478 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479 break;
4480 }
Jeff Layton6993f742012-05-16 07:13:17 -04004481
4482 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483 }
4484
David Howells54afa992013-09-04 17:10:39 +00004485 /* Any pages that have been shown to fscache but didn't get added to
4486 * the pagecache must be uncached before they get returned to the
4487 * allocator.
4488 */
4489 cifs_fscache_readpages_cancel(mapping->host, page_list);
Steve French0cb012d2018-10-11 01:01:02 -05004490 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491 return rc;
4492}
4493
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01004494/*
4495 * cifs_readpage_worker must be called with the page pinned
4496 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497static int cifs_readpage_worker(struct file *file, struct page *page,
4498 loff_t *poffset)
4499{
4500 char *read_data;
4501 int rc;
4502
Suresh Jayaraman566982362010-07-05 18:13:25 +05304503 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05004504 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304505 if (rc == 0)
4506 goto read_complete;
4507
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508 read_data = kmap(page);
4509 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004510
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004511 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004512
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513 if (rc < 0)
4514 goto io_error;
4515 else
Joe Perchesf96637b2013-05-04 22:12:25 -05004516 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004517
Steve French9b9c5be2018-09-22 12:07:06 -05004518 /* we do not want atime to be less than mtime, it broke some apps */
4519 file_inode(file)->i_atime = current_time(file_inode(file));
4520 if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4521 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4522 else
4523 file_inode(file)->i_atime = current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004524
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004525 if (PAGE_SIZE > rc)
4526 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004527
4528 flush_dcache_page(page);
4529 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304530
4531 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05004532 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304533
Linus Torvalds1da177e2005-04-16 15:20:36 -07004534 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004535
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004537 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004538 unlock_page(page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304539
4540read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004541 return rc;
4542}
4543
4544static int cifs_readpage(struct file *file, struct page *page)
4545{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004546 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004547 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004548 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004549
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004550 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004551
4552 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304553 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004554 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304555 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004556 }
4557
Joe Perchesf96637b2013-05-04 22:12:25 -05004558 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00004559 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004560
4561 rc = cifs_readpage_worker(file, page, &offset);
4562
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004563 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564 return rc;
4565}
4566
Steve Frencha403a0a2007-07-26 15:54:16 +00004567static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4568{
4569 struct cifsFileInfo *open_file;
4570
Dave Wysochanskicb248812019-10-03 15:16:27 +10004571 spin_lock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004572 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04004573 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Dave Wysochanskicb248812019-10-03 15:16:27 +10004574 spin_unlock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004575 return 1;
4576 }
4577 }
Dave Wysochanskicb248812019-10-03 15:16:27 +10004578 spin_unlock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004579 return 0;
4580}
4581
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582/* We do not want to update the file size from server for inodes
4583 open for write - to avoid races with writepage extending
4584 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004585 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586 but this is tricky to do without racing with writebehind
4587 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00004588bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004589{
Steve Frencha403a0a2007-07-26 15:54:16 +00004590 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00004591 return true;
Steve French23e7dd72005-10-20 13:44:56 -07004592
Steve Frencha403a0a2007-07-26 15:54:16 +00004593 if (is_inode_writable(cifsInode)) {
4594 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08004595 struct cifs_sb_info *cifs_sb;
4596
Steve Frenchc32a0b62006-01-12 14:41:28 -08004597 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00004598 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004599 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08004600 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00004601 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08004602 }
4603
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004604 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00004605 return true;
Steve French7ba526312007-02-08 18:14:13 +00004606
Steve French4b18f2a2008-04-29 00:06:05 +00004607 return false;
Steve French23e7dd72005-10-20 13:44:56 -07004608 } else
Steve French4b18f2a2008-04-29 00:06:05 +00004609 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610}
4611
Nick Piggind9414772008-09-24 11:32:59 -04004612static int cifs_write_begin(struct file *file, struct address_space *mapping,
4613 loff_t pos, unsigned len, unsigned flags,
4614 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615{
Sachin Prabhu466bd312013-09-13 14:11:57 +01004616 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004617 pgoff_t index = pos >> PAGE_SHIFT;
4618 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004619 loff_t page_start = pos & PAGE_MASK;
4620 loff_t i_size;
4621 struct page *page;
4622 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623
Joe Perchesf96637b2013-05-04 22:12:25 -05004624 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04004625
Sachin Prabhu466bd312013-09-13 14:11:57 +01004626start:
Nick Piggin54566b22009-01-04 12:00:53 -08004627 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004628 if (!page) {
4629 rc = -ENOMEM;
4630 goto out;
4631 }
Nick Piggind9414772008-09-24 11:32:59 -04004632
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004633 if (PageUptodate(page))
4634 goto out;
Steve French8a236262007-03-06 00:31:00 +00004635
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004636 /*
4637 * If we write a full page it will be up to date, no need to read from
4638 * the server. If the write is short, we'll end up doing a sync write
4639 * instead.
4640 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004641 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004642 goto out;
4643
4644 /*
4645 * optimize away the read when we have an oplock, and we're not
4646 * expecting to use any of the data we'd be reading in. That
4647 * is, when the page lies beyond the EOF, or straddles the EOF
4648 * and the write will cover all of the existing data.
4649 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004650 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004651 i_size = i_size_read(mapping->host);
4652 if (page_start >= i_size ||
4653 (offset == 0 && (pos + len) >= i_size)) {
4654 zero_user_segments(page, 0, offset,
4655 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004656 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004657 /*
4658 * PageChecked means that the parts of the page
4659 * to which we're not writing are considered up
4660 * to date. Once the data is copied to the
4661 * page, it can be set uptodate.
4662 */
4663 SetPageChecked(page);
4664 goto out;
4665 }
4666 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667
Sachin Prabhu466bd312013-09-13 14:11:57 +01004668 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004669 /*
4670 * might as well read a page, it is fast enough. If we get
4671 * an error, we don't need to return it. cifs_write_end will
4672 * do a sync write instead since PG_uptodate isn't set.
4673 */
4674 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004675 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004676 oncethru = 1;
4677 goto start;
Steve French8a236262007-03-06 00:31:00 +00004678 } else {
4679 /* we could try using another file handle if there is one -
4680 but how would we lock it to prevent close of that handle
4681 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04004682 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00004683 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004684out:
4685 *pagep = page;
4686 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687}
4688
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304689static int cifs_release_page(struct page *page, gfp_t gfp)
4690{
4691 if (PagePrivate(page))
4692 return 0;
4693
4694 return cifs_fscache_release_page(page, gfp);
4695}
4696
Lukas Czernerd47992f2013-05-21 23:17:23 -04004697static void cifs_invalidate_page(struct page *page, unsigned int offset,
4698 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304699{
4700 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4701
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004702 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304703 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4704}
4705
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004706static int cifs_launder_page(struct page *page)
4707{
4708 int rc = 0;
4709 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004710 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004711 struct writeback_control wbc = {
4712 .sync_mode = WB_SYNC_ALL,
4713 .nr_to_write = 0,
4714 .range_start = range_start,
4715 .range_end = range_end,
4716 };
4717
Joe Perchesf96637b2013-05-04 22:12:25 -05004718 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004719
4720 if (clear_page_dirty_for_io(page))
4721 rc = cifs_writepage_locked(page, &wbc);
4722
4723 cifs_fscache_invalidate_page(page, page->mapping->host);
4724 return rc;
4725}
4726
Tejun Heo9b646972010-07-20 22:09:02 +02004727void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04004728{
4729 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4730 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00004731 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004732 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004733 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004734 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004735 int rc = 0;
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004736 bool purge_cache = false;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004737
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004738 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10004739 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004740
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004741 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
4742 cfile->oplock_epoch, &purge_cache);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004743
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004744 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004745 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004746 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4747 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004748 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004749 }
4750
Jeff Layton3bc303c2009-09-21 06:47:50 -04004751 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004752 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05004753 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00004754 else
Al Viro8737c932009-12-24 06:47:55 -05004755 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004756 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004757 if (!CIFS_CACHE_READ(cinode) || purge_cache) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004758 rc = filemap_fdatawait(inode->i_mapping);
4759 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004760 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004761 }
Joe Perchesf96637b2013-05-04 22:12:25 -05004762 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004763 if (CIFS_CACHE_WRITE(cinode))
4764 goto oplock_break_ack;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004765 }
4766
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004767 rc = cifs_push_locks(cfile);
4768 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004769 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004770
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004771oplock_break_ack:
Jeff Layton3bc303c2009-09-21 06:47:50 -04004772 /*
4773 * releasing stale oplock after recent reconnect of smb session using
4774 * a now incorrect file handle is not a data integrity issue but do
4775 * not bother sending an oplock release if session to server still is
4776 * disconnected since oplock already released by the server
4777 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00004778 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004779 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4780 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05004781 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004782 }
Ronnie Sahlberg32546a92019-11-03 13:06:37 +10004783 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004784 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004785}
4786
Steve Frenchdca69282013-11-11 16:42:37 -06004787/*
4788 * The presence of cifs_direct_io() in the address space ops vector
4789 * allowes open() O_DIRECT flags which would have failed otherwise.
4790 *
4791 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4792 * so this method should never be called.
4793 *
4794 * Direct IO is not yet supported in the cached mode.
4795 */
4796static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07004797cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06004798{
4799 /*
4800 * FIXME
4801 * Eventually need to support direct IO for non forcedirectio mounts
4802 */
4803 return -EINVAL;
4804}
4805
4806
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004807const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004808 .readpage = cifs_readpage,
4809 .readpages = cifs_readpages,
4810 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07004811 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004812 .write_begin = cifs_write_begin,
4813 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004814 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304815 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06004816 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304817 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004818 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004819};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004820
4821/*
4822 * cifs_readpages requires the server to support a buffer large enough to
4823 * contain the header plus one complete page of data. Otherwise, we need
4824 * to leave cifs_readpages out of the address space operations.
4825 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004826const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004827 .readpage = cifs_readpage,
4828 .writepage = cifs_writepage,
4829 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004830 .write_begin = cifs_write_begin,
4831 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004832 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304833 .releasepage = cifs_release_page,
4834 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004835 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004836};