blob: 97090693d18278045f5447fcf4de4cf162edaefb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Nikolay Borisovf86196e2019-01-03 15:29:02 -080036#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/div64.h>
38#include "cifsfs.h"
39#include "cifspdu.h"
40#include "cifsglob.h"
41#include "cifsproto.h"
42#include "cifs_unicode.h"
43#include "cifs_debug.h"
44#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053045#include "fscache.h"
Long Libd3dcc62017-11-22 17:38:47 -070046#include "smbdirect.h"
Steve French07b92d02013-02-18 10:34:26 -060047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048static inline int cifs_convert_flags(unsigned int flags)
49{
50 if ((flags & O_ACCMODE) == O_RDONLY)
51 return GENERIC_READ;
52 else if ((flags & O_ACCMODE) == O_WRONLY)
53 return GENERIC_WRITE;
54 else if ((flags & O_ACCMODE) == O_RDWR) {
55 /* GENERIC_ALL is too much permission to request
56 can cause unnecessary access denied on create */
57 /* return GENERIC_ALL; */
58 return (GENERIC_READ | GENERIC_WRITE);
59 }
60
Jeff Laytone10f7b52008-05-14 10:21:33 -070061 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
62 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
63 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000064}
Jeff Laytone10f7b52008-05-14 10:21:33 -070065
Jeff Layton608712f2010-10-15 15:33:56 -040066static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000067{
Jeff Layton608712f2010-10-15 15:33:56 -040068 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070069
Steve French7fc8f4e2009-02-23 20:43:11 +000070 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000072 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040073 posix_flags = SMB_O_WRONLY;
74 else if ((flags & O_ACCMODE) == O_RDWR)
75 posix_flags = SMB_O_RDWR;
76
Steve French07b92d02013-02-18 10:34:26 -060077 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040078 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060079 if (flags & O_EXCL)
80 posix_flags |= SMB_O_EXCL;
81 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050082 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
83 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060084
Jeff Layton608712f2010-10-15 15:33:56 -040085 if (flags & O_TRUNC)
86 posix_flags |= SMB_O_TRUNC;
87 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010088 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000090 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040091 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000092 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040093 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000094 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040095 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000096
97 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098}
99
100static inline int cifs_get_disposition(unsigned int flags)
101{
102 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
103 return FILE_CREATE;
104 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
105 return FILE_OVERWRITE_IF;
106 else if ((flags & O_CREAT) == O_CREAT)
107 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000108 else if ((flags & O_TRUNC) == O_TRUNC)
109 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 else
111 return FILE_OPEN;
112}
113
Jeff Layton608712f2010-10-15 15:33:56 -0400114int cifs_posix_open(char *full_path, struct inode **pinode,
115 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400116 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400117{
118 int rc;
119 FILE_UNIX_BASIC_INFO *presp_data;
120 __u32 posix_flags = 0;
121 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
122 struct cifs_fattr fattr;
123 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000124 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400125
Joe Perchesf96637b2013-05-04 22:12:25 -0500126 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400127
128 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
129 if (presp_data == NULL)
130 return -ENOMEM;
131
132 tlink = cifs_sb_tlink(cifs_sb);
133 if (IS_ERR(tlink)) {
134 rc = PTR_ERR(tlink);
135 goto posix_open_ret;
136 }
137
138 tcon = tlink_tcon(tlink);
139 mode &= ~current_umask();
140
141 posix_flags = cifs_posix_convert_flags(f_flags);
142 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
143 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900144 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153 if (!pinode)
154 goto posix_open_ret; /* caller does not need info */
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158 /* get new inode and set it up */
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
167 cifs_fattr_to_inode(*pinode, &fattr);
168 }
169
170posix_open_ret:
171 kfree(presp_data);
172 return rc;
173}
174
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300175static int
176cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179{
180 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300182 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500183 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300184 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700185 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400186 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300187
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700188 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700189 return -ENOSYS;
190
191 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300192
193/*********************************************************************
194 * open flag mapping table:
195 *
196 * POSIX Flag CIFS Disposition
197 * ---------- ----------------
198 * O_CREAT FILE_OPEN_IF
199 * O_CREAT | O_EXCL FILE_CREATE
200 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
201 * O_TRUNC FILE_OVERWRITE
202 * none of the above FILE_OPEN
203 *
204 * Note that there is not a direct match between disposition
205 * FILE_SUPERSEDE (ie create whether or not file exists although
206 * O_CREAT | O_TRUNC is similar but truncates the existing
207 * file rather than creating a new file as FILE_SUPERSEDE does
208 * (which uses the attributes / metadata passed in on open call)
209 *?
210 *? O_SYNC is a reasonable match to CIFS writethrough flag
211 *? and the read write flags match reasonably. O_LARGEFILE
212 *? is irrelevant because largefile support is always used
213 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
214 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
215 *********************************************************************/
216
217 disposition = cifs_get_disposition(f_flags);
218
219 /* BB pass O_SYNC flag through on file attributes .. BB */
220
221 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500225 if (backup_cred(cifs_sb))
226 create_options |= CREATE_OPEN_BACKUP_INTENT;
227
Steve French1013e762017-09-22 01:40:27 -0500228 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
229 if (f_flags & O_SYNC)
230 create_options |= CREATE_WRITE_THROUGH;
231
232 if (f_flags & O_DIRECT)
233 create_options |= CREATE_NO_BUFFER;
234
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400235 oparms.tcon = tcon;
236 oparms.cifs_sb = cifs_sb;
237 oparms.desired_access = desired_access;
238 oparms.create_options = create_options;
239 oparms.disposition = disposition;
240 oparms.path = full_path;
241 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400242 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400243
244 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300245
246 if (rc)
247 goto out;
248
249 if (tcon->unix_ext)
250 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
251 xid);
252 else
253 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600254 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300255
256out:
257 kfree(buf);
258 return rc;
259}
260
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400261static bool
262cifs_has_mand_locks(struct cifsInodeInfo *cinode)
263{
264 struct cifs_fid_locks *cur;
265 bool has_locks = false;
266
267 down_read(&cinode->lock_sem);
268 list_for_each_entry(cur, &cinode->llist, llist) {
269 if (!list_empty(&cur->locks)) {
270 has_locks = true;
271 break;
272 }
273 }
274 up_read(&cinode->lock_sem);
275 return has_locks;
276}
277
Jeff Layton15ecb432010-10-15 15:34:02 -0400278struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700279cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400280 struct tcon_link *tlink, __u32 oplock)
281{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500282 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000283 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700284 struct cifsInodeInfo *cinode = CIFS_I(inode);
285 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700286 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700287 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400288 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400289
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700290 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
291 if (cfile == NULL)
292 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400293
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700294 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
295 if (!fdlocks) {
296 kfree(cfile);
297 return NULL;
298 }
299
300 INIT_LIST_HEAD(&fdlocks->locks);
301 fdlocks->cfile = cfile;
302 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700303 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700304 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700305 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700306
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700307 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700308 cfile->pid = current->tgid;
309 cfile->uid = current_fsuid();
310 cfile->dentry = dget(dentry);
311 cfile->f_flags = file->f_flags;
312 cfile->invalidHandle = false;
313 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700314 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700315 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500316 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400317
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100318 cifs_sb_active(inode->i_sb);
319
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400320 /*
321 * If the server returned a read oplock and we have mandatory brlocks,
322 * set oplock level to None.
323 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400324 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500325 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400326 oplock = 0;
327 }
328
Steve French3afca262016-09-22 18:58:16 -0500329 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400330 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700331 oplock = fid->pending_open->oplock;
332 list_del(&fid->pending_open->olist);
333
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400334 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400335 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700336
337 list_add(&cfile->tlist, &tcon->openFileList);
Steve Frenchfae80442018-10-19 17:14:32 -0500338 atomic_inc(&tcon->num_local_opens);
Steve French3afca262016-09-22 18:58:16 -0500339
Jeff Layton15ecb432010-10-15 15:34:02 -0400340 /* if readable file instance put first in list*/
Ronnie Sahlberg487317c2019-06-05 10:38:38 +1000341 spin_lock(&cinode->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400342 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700343 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400344 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700345 list_add_tail(&cfile->flist, &cinode->openFileList);
Ronnie Sahlberg487317c2019-06-05 10:38:38 +1000346 spin_unlock(&cinode->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500347 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400348
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400349 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400350 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400351
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700352 file->private_data = cfile;
353 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400354}
355
Jeff Layton764a1b12012-07-25 14:59:54 -0400356struct cifsFileInfo *
357cifsFileInfo_get(struct cifsFileInfo *cifs_file)
358{
Steve French3afca262016-09-22 18:58:16 -0500359 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400360 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500361 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400362 return cifs_file;
363}
364
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100365/**
366 * cifsFileInfo_put - release a reference of file priv data
367 *
368 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
Steve Frenchcdff08e2010-10-21 22:46:14 +0000369 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400370void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
371{
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100372 _cifsFileInfo_put(cifs_file, true);
373}
374
375/**
376 * _cifsFileInfo_put - release a reference of file priv data
377 *
378 * This may involve closing the filehandle @cifs_file out on the
379 * server. Must be called without holding tcon->open_file_lock and
380 * cifs_file->file_info_lock.
381 *
382 * If @wait_for_oplock_handler is true and we are releasing the last
383 * reference, wait for any running oplock break handler of the file
384 * and cancel any pending one. If calling this function from the
385 * oplock break handler, you need to pass false.
386 *
387 */
388void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
389{
David Howells2b0143b2015-03-17 22:25:59 +0000390 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000391 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700392 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300393 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100394 struct super_block *sb = inode->i_sb;
395 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000396 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700397 struct cifs_fid fid;
398 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000399 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000400
Steve French3afca262016-09-22 18:58:16 -0500401 spin_lock(&tcon->open_file_lock);
402
403 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400404 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500405 spin_unlock(&cifs_file->file_info_lock);
406 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000407 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400408 }
Steve French3afca262016-09-22 18:58:16 -0500409 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000410
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700411 if (server->ops->get_lease_key)
412 server->ops->get_lease_key(inode, &fid);
413
414 /* store open in pending opens to make sure we don't miss lease break */
415 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
416
Steve Frenchcdff08e2010-10-21 22:46:14 +0000417 /* remove it from the lists */
Ronnie Sahlberg487317c2019-06-05 10:38:38 +1000418 spin_lock(&cifsi->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000419 list_del(&cifs_file->flist);
Ronnie Sahlberg487317c2019-06-05 10:38:38 +1000420 spin_unlock(&cifsi->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000421 list_del(&cifs_file->tlist);
Steve Frenchfae80442018-10-19 17:14:32 -0500422 atomic_dec(&tcon->num_local_opens);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000423
424 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500425 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000426 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700427 /*
428 * In strict cache mode we need invalidate mapping on the last
429 * close because it may cause a error when we open this file
430 * again and get at least level II oplock.
431 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300432 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400433 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300434 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000435 }
Steve French3afca262016-09-22 18:58:16 -0500436
437 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000438
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100439 oplock_break_cancelled = wait_oplock_handler ?
440 cancel_work_sync(&cifs_file->oplock_break) : false;
Jeff Laytonad635942011-07-26 12:20:17 -0400441
Steve Frenchcdff08e2010-10-21 22:46:14 +0000442 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700443 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400444 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700445
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400446 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700447 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400448 server->ops->close(xid, tcon, &cifs_file->fid);
449 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000450 }
451
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000452 if (oplock_break_cancelled)
453 cifs_done_oplock_break(cifsi);
454
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700455 cifs_del_pending_open(&open);
456
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700457 /*
458 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000459 * is closed anyway.
460 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700461 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700462 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000463 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400464 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000465 kfree(li);
466 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700467 list_del(&cifs_file->llist->llist);
468 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700469 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000470
471 cifs_put_tlink(cifs_file->tlink);
472 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100473 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000474 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400475}
476
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700478
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479{
480 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400481 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400482 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700484 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000485 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400486 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700487 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300489 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700490 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700491 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400493 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
495 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400496 tlink = cifs_sb_tlink(cifs_sb);
497 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400498 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400499 return PTR_ERR(tlink);
500 }
501 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700502 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500504 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530506 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400507 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 }
509
Joe Perchesf96637b2013-05-04 22:12:25 -0500510 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000511 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000512
Namjae Jeon787aded2014-08-22 14:22:51 +0900513 if (file->f_flags & O_DIRECT &&
514 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
515 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
516 file->f_op = &cifs_file_direct_nobrl_ops;
517 else
518 file->f_op = &cifs_file_direct_ops;
519 }
520
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700521 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000522 oplock = REQ_OPLOCK;
523 else
524 oplock = 0;
525
Steve French64cc2c62009-03-04 19:54:08 +0000526 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400527 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
528 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000529 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400530 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000531 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700532 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000533 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500534 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300535 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000536 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
537 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500538 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
539 tcon->ses->serverName,
540 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000541 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000542 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
543 (rc != -EOPNOTSUPP)) /* path not found or net err */
544 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700545 /*
546 * Else fallthrough to retry open the old way on network i/o
547 * or DFS errors.
548 */
Steve French276a74a2009-03-03 18:00:34 +0000549 }
550
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700551 if (server->ops->get_lease_key)
552 server->ops->get_lease_key(inode, &fid);
553
554 cifs_add_pending_open(&fid, tlink, &open);
555
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300556 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700557 if (server->ops->get_lease_key)
558 server->ops->get_lease_key(inode, &fid);
559
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300560 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700561 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700562 if (rc) {
563 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300564 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700565 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300566 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400567
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700568 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
569 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700570 if (server->ops->close)
571 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700572 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 rc = -ENOMEM;
574 goto out;
575 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530577 cifs_fscache_set_inode_cookie(inode, file);
578
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300579 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700580 /*
581 * Time to set mode which we can not set earlier due to
582 * problems creating new read-only files.
583 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300584 struct cifs_unix_set_info_args args = {
585 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800586 .uid = INVALID_UID, /* no change */
587 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300588 .ctime = NO_CHANGE_64,
589 .atime = NO_CHANGE_64,
590 .mtime = NO_CHANGE_64,
591 .device = 0,
592 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700593 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
594 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 }
596
597out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400599 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400600 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 return rc;
602}
603
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400604static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
605
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700606/*
607 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400608 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700609 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400610static int
611cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400613 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000614 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400615 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 int rc = 0;
617
Rabin Vincent560d3882017-05-03 17:17:21 +0200618 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400619 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400620 /* can cache locks - no need to relock */
621 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400622 return rc;
623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400625 if (cap_unix(tcon->ses) &&
626 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
627 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
628 rc = cifs_push_posix_locks(cfile);
629 else
630 rc = tcon->ses->server->ops->push_mand_locks(cfile);
631
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400632 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 return rc;
634}
635
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700636static int
637cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638{
639 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400640 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400641 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000643 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700644 struct TCP_Server_Info *server;
645 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000646 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700648 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500650 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400651 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400653 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700654 mutex_lock(&cfile->fh_mutex);
655 if (!cfile->invalidHandle) {
656 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530657 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400658 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530659 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 }
661
David Howells2b0143b2015-03-17 22:25:59 +0000662 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700664 tcon = tlink_tcon(cfile->tlink);
665 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000666
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700667 /*
668 * Can not grab rename sem here because various ops, including those
669 * that already have the rename sem can end up causing writepage to get
670 * called and if the server was down that means we end up here, and we
671 * can never tell if the caller already has the rename_sem.
672 */
673 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000675 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700676 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400677 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000678 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 }
680
Joe Perchesf96637b2013-05-04 22:12:25 -0500681 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
682 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300684 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 oplock = REQ_OPLOCK;
686 else
Steve French4b18f2a2008-04-29 00:06:05 +0000687 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400689 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000690 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400691 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400692 /*
693 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
694 * original open. Must mask them off for a reopen.
695 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700696 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400697 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400698
Jeff Layton2422f672010-06-16 13:40:16 -0400699 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700700 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400701 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000702 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500703 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200704 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000705 goto reopen_success;
706 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700707 /*
708 * fallthrough to retry open the old way on errors, especially
709 * in the reconnect path it is important to retry hard
710 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000711 }
712
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700713 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000714
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500715 if (backup_cred(cifs_sb))
716 create_options |= CREATE_OPEN_BACKUP_INTENT;
717
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700718 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400719 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700720
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400721 oparms.tcon = tcon;
722 oparms.cifs_sb = cifs_sb;
723 oparms.desired_access = desired_access;
724 oparms.create_options = create_options;
725 oparms.disposition = disposition;
726 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400727 oparms.fid = &cfile->fid;
728 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400729
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700730 /*
731 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400732 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700733 * file might have write behind data that needs to be flushed and server
734 * version of file size can be stale. If we knew for sure that inode was
735 * not dirty locally we could do this.
736 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400737 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400738 if (rc == -ENOENT && oparms.reconnect == false) {
739 /* durable handle timeout is expired - open the file again */
740 rc = server->ops->open(xid, &oparms, &oplock, NULL);
741 /* indicate that we need to relock the file */
742 oparms.reconnect = true;
743 }
744
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700746 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500747 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
748 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400749 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 }
Jeff Layton15886172010-10-15 15:33:59 -0400751
752reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700753 cfile->invalidHandle = false;
754 mutex_unlock(&cfile->fh_mutex);
755 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400756
757 if (can_flush) {
758 rc = filemap_write_and_wait(inode->i_mapping);
Pavel Shilovsky9a663962019-01-08 11:15:28 -0800759 if (!is_interrupt_error(rc))
760 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400761
Jeff Layton15886172010-10-15 15:33:59 -0400762 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700763 rc = cifs_get_inode_info_unix(&inode, full_path,
764 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400765 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700766 rc = cifs_get_inode_info(&inode, full_path, NULL,
767 inode->i_sb, xid, NULL);
768 }
769 /*
770 * Else we are writing out data to server already and could deadlock if
771 * we tried to flush data, and since we do not know if we have data that
772 * would invalidate the current end of file on the server we can not go
773 * to the server to get the new inode info.
774 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300775
Pavel Shilovskyde740252016-10-11 15:34:07 -0700776 /*
777 * If the server returned a read oplock and we have mandatory brlocks,
778 * set oplock level to None.
779 */
780 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
781 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
782 oplock = 0;
783 }
784
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400785 server->ops->set_fid(cfile, &cfile->fid, oplock);
786 if (oparms.reconnect)
787 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400788
789reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400791 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 return rc;
793}
794
795int cifs_close(struct inode *inode, struct file *file)
796{
Jeff Layton77970692011-04-05 16:23:47 -0700797 if (file->private_data != NULL) {
798 cifsFileInfo_put(file->private_data);
799 file->private_data = NULL;
800 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
Steve Frenchcdff08e2010-10-21 22:46:14 +0000802 /* return code from the ->release op is always ignored */
803 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804}
805
Steve French52ace1e2016-09-22 19:23:56 -0500806void
807cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
808{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700809 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500810 struct list_head *tmp;
811 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700812 struct list_head tmp_list;
813
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800814 if (!tcon->use_persistent || !tcon->need_reopen_files)
815 return;
816
817 tcon->need_reopen_files = false;
818
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700819 cifs_dbg(FYI, "Reopen persistent handles");
820 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500821
822 /* list all files open on tree connection, reopen resilient handles */
823 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700824 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500825 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700826 if (!open_file->invalidHandle)
827 continue;
828 cifsFileInfo_get(open_file);
829 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500830 }
831 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700832
833 list_for_each_safe(tmp, tmp1, &tmp_list) {
834 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800835 if (cifs_reopen_file(open_file, false /* do not flush */))
836 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700837 list_del_init(&open_file->rlist);
838 cifsFileInfo_put(open_file);
839 }
Steve French52ace1e2016-09-22 19:23:56 -0500840}
841
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842int cifs_closedir(struct inode *inode, struct file *file)
843{
844 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400845 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700846 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700847 struct cifs_tcon *tcon;
848 struct TCP_Server_Info *server;
849 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850
Joe Perchesf96637b2013-05-04 22:12:25 -0500851 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700853 if (cfile == NULL)
854 return rc;
855
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400856 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700857 tcon = tlink_tcon(cfile->tlink);
858 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859
Joe Perchesf96637b2013-05-04 22:12:25 -0500860 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500861 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400862 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700863 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500864 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700865 if (server->ops->close_dir)
866 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
867 else
868 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500869 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700870 /* not much we can do if it fails anyway, ignore rc */
871 rc = 0;
872 } else
Steve French3afca262016-09-22 18:58:16 -0500873 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700875 buf = cfile->srch_inf.ntwrk_buf_start;
876 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500877 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700878 cfile->srch_inf.ntwrk_buf_start = NULL;
879 if (cfile->srch_inf.smallBuf)
880 cifs_small_buf_release(buf);
881 else
882 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700884
885 cifs_put_tlink(cfile->tlink);
886 kfree(file->private_data);
887 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400889 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 return rc;
891}
892
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400893static struct cifsLockInfo *
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000894cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000895{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400896 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000897 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400898 if (!lock)
899 return lock;
900 lock->offset = offset;
901 lock->length = length;
902 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400903 lock->pid = current->tgid;
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000904 lock->flags = flags;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400905 INIT_LIST_HEAD(&lock->blist);
906 init_waitqueue_head(&lock->block_q);
907 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400908}
909
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700910void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400911cifs_del_lock_waiters(struct cifsLockInfo *lock)
912{
913 struct cifsLockInfo *li, *tmp;
914 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
915 list_del_init(&li->blist);
916 wake_up(&li->block_q);
917 }
918}
919
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400920#define CIFS_LOCK_OP 0
921#define CIFS_READ_OP 1
922#define CIFS_WRITE_OP 2
923
924/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400925static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700926cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000927 __u64 length, __u8 type, __u16 flags,
928 struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400929 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400930{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300931 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700932 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300933 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400934
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700935 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400936 if (offset + length <= li->offset ||
937 offset >= li->offset + li->length)
938 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400939 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
940 server->ops->compare_fids(cfile, cur_cfile)) {
941 /* shared lock prevents write op through the same fid */
942 if (!(li->type & server->vals->shared_lock_type) ||
943 rw_check != CIFS_WRITE_OP)
944 continue;
945 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700946 if ((type & server->vals->shared_lock_type) &&
947 ((server->ops->compare_fids(cfile, cur_cfile) &&
948 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400949 continue;
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000950 if (rw_check == CIFS_LOCK_OP &&
951 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
952 server->ops->compare_fids(cfile, cur_cfile))
953 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700954 if (conf_lock)
955 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700956 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400957 }
958 return false;
959}
960
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700961bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300962cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000963 __u8 type, __u16 flags,
964 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400965{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300966 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700967 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000968 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300969
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700970 list_for_each_entry(cur, &cinode->llist, llist) {
971 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000972 flags, cfile, conf_lock,
973 rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300974 if (rc)
975 break;
976 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300977
978 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400979}
980
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300981/*
982 * Check if there is another lock that prevents us to set the lock (mandatory
983 * style). If such a lock exists, update the flock structure with its
984 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
985 * or leave it the same if we can't. Returns 0 if we don't need to request to
986 * the server or 1 otherwise.
987 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400988static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300989cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
990 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400991{
992 int rc = 0;
993 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000994 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300995 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400996 bool exist;
997
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700998 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400999
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001000 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001001 flock->fl_flags, &conf_lock,
1002 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001003 if (exist) {
1004 flock->fl_start = conf_lock->offset;
1005 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1006 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001007 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001008 flock->fl_type = F_RDLCK;
1009 else
1010 flock->fl_type = F_WRLCK;
1011 } else if (!cinode->can_cache_brlcks)
1012 rc = 1;
1013 else
1014 flock->fl_type = F_UNLCK;
1015
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001016 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001017 return rc;
1018}
1019
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001020static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001021cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001022{
David Howells2b0143b2015-03-17 22:25:59 +00001023 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001024 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001025 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001026 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001027}
1028
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001029/*
1030 * Set the byte-range lock (mandatory style). Returns:
1031 * 1) 0, if we set the lock and don't need to request to the server;
1032 * 2) 1, if no locks prevent us but we need to request to the server;
Colin Ian King413d6102018-10-26 19:07:21 +01001033 * 3) -EACCES, if there is a lock that prevents us and wait is false.
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001034 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001035static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001036cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001037 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001038{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001039 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001040 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001041 bool exist;
1042 int rc = 0;
1043
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001044try_again:
1045 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001046 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001047
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001048 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001049 lock->type, lock->flags, &conf_lock,
1050 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001051 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001052 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001053 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001054 return rc;
1055 }
1056
1057 if (!exist)
1058 rc = 1;
1059 else if (!wait)
1060 rc = -EACCES;
1061 else {
1062 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001063 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001064 rc = wait_event_interruptible(lock->block_q,
1065 (lock->blist.prev == &lock->blist) &&
1066 (lock->blist.next == &lock->blist));
1067 if (!rc)
1068 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001069 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001070 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001071 }
1072
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001073 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001074 return rc;
1075}
1076
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001077/*
1078 * Check if there is another lock that prevents us to set the lock (posix
1079 * style). If such a lock exists, update the flock structure with its
1080 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1081 * or leave it the same if we can't. Returns 0 if we don't need to request to
1082 * the server or 1 otherwise.
1083 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001084static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001085cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1086{
1087 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001088 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001089 unsigned char saved_type = flock->fl_type;
1090
Pavel Shilovsky50792762011-10-29 17:17:57 +04001091 if ((flock->fl_flags & FL_POSIX) == 0)
1092 return 1;
1093
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001094 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001095 posix_test_lock(file, flock);
1096
1097 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1098 flock->fl_type = saved_type;
1099 rc = 1;
1100 }
1101
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001102 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001103 return rc;
1104}
1105
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001106/*
1107 * Set the byte-range lock (posix style). Returns:
1108 * 1) 0, if we set the lock and don't need to request to the server;
1109 * 2) 1, if we need to request to the server;
1110 * 3) <0, if the error occurs while setting the lock.
1111 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001112static int
1113cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1114{
Al Viro496ad9a2013-01-23 17:07:38 -05001115 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001116 int rc = 1;
1117
1118 if ((flock->fl_flags & FL_POSIX) == 0)
1119 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001120
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001121try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001122 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001123 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001124 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001125 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001126 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001127
1128 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001129 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001130 if (rc == FILE_LOCK_DEFERRED) {
NeilBrownada5c1d2018-11-30 10:04:08 +11001131 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_blocker);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001132 if (!rc)
1133 goto try_again;
NeilBrowncb03f942018-11-30 10:04:08 +11001134 locks_delete_block(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001135 }
Steve French9ebb3892012-04-01 13:52:54 -05001136 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001137}
1138
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001139int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001140cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001141{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001142 unsigned int xid;
1143 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001144 struct cifsLockInfo *li, *tmp;
1145 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001146 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001147 LOCKING_ANDX_RANGE *buf, *cur;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001148 static const int types[] = {
1149 LOCKING_ANDX_LARGE_FILES,
1150 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1151 };
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001152 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001153
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001154 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001155 tcon = tlink_tcon(cfile->tlink);
1156
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001157 /*
1158 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001159 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001160 */
1161 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001162 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001163 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001164 return -EINVAL;
1165 }
1166
Ross Lagerwall92a81092019-01-08 18:30:56 +00001167 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1168 PAGE_SIZE);
1169 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1170 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001171 max_num = (max_buf - sizeof(struct smb_hdr)) /
1172 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001173 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001174 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001175 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001176 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001177 }
1178
1179 for (i = 0; i < 2; i++) {
1180 cur = buf;
1181 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001182 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001183 if (li->type != types[i])
1184 continue;
1185 cur->Pid = cpu_to_le16(li->pid);
1186 cur->LengthLow = cpu_to_le32((u32)li->length);
1187 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1188 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1189 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1190 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001191 stored_rc = cifs_lockv(xid, tcon,
1192 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001193 (__u8)li->type, 0, num,
1194 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001195 if (stored_rc)
1196 rc = stored_rc;
1197 cur = buf;
1198 num = 0;
1199 } else
1200 cur++;
1201 }
1202
1203 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001204 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001205 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001206 if (stored_rc)
1207 rc = stored_rc;
1208 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001209 }
1210
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001211 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001212 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001213 return rc;
1214}
1215
Jeff Layton3d224622016-05-24 06:27:44 -04001216static __u32
1217hash_lockowner(fl_owner_t owner)
1218{
1219 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1220}
1221
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001222struct lock_to_push {
1223 struct list_head llist;
1224 __u64 offset;
1225 __u64 length;
1226 __u32 pid;
1227 __u16 netfid;
1228 __u8 type;
1229};
1230
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001231static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001232cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001233{
David Howells2b0143b2015-03-17 22:25:59 +00001234 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001235 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001236 struct file_lock *flock;
1237 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001238 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001239 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001240 struct list_head locks_to_send, *el;
1241 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001242 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001243
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001244 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001245
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001246 if (!flctx)
1247 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001248
Jeff Laytone084c1b2015-02-16 14:32:03 -05001249 spin_lock(&flctx->flc_lock);
1250 list_for_each(el, &flctx->flc_posix) {
1251 count++;
1252 }
1253 spin_unlock(&flctx->flc_lock);
1254
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001255 INIT_LIST_HEAD(&locks_to_send);
1256
1257 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001258 * Allocating count locks is enough because no FL_POSIX locks can be
1259 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001260 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001261 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001262 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001263 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1264 if (!lck) {
1265 rc = -ENOMEM;
1266 goto err_out;
1267 }
1268 list_add_tail(&lck->llist, &locks_to_send);
1269 }
1270
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001271 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001272 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001273 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001274 if (el == &locks_to_send) {
1275 /*
1276 * The list ended. We don't have enough allocated
1277 * structures - something is really wrong.
1278 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001279 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001280 break;
1281 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001282 length = 1 + flock->fl_end - flock->fl_start;
1283 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1284 type = CIFS_RDLCK;
1285 else
1286 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001287 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001288 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001289 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001290 lck->length = length;
1291 lck->type = type;
1292 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001293 }
Jeff Layton6109c852015-01-16 15:05:57 -05001294 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001295
1296 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001297 int stored_rc;
1298
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001299 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001300 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001301 lck->type, 0);
1302 if (stored_rc)
1303 rc = stored_rc;
1304 list_del(&lck->llist);
1305 kfree(lck);
1306 }
1307
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001308out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001309 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001310 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001311err_out:
1312 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1313 list_del(&lck->llist);
1314 kfree(lck);
1315 }
1316 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001317}
1318
1319static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001320cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001321{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001322 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001323 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001324 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001325 int rc = 0;
1326
1327 /* we are going to update can_cache_brlcks here - need a write access */
1328 down_write(&cinode->lock_sem);
1329 if (!cinode->can_cache_brlcks) {
1330 up_write(&cinode->lock_sem);
1331 return rc;
1332 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001333
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001334 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001335 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1336 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001337 rc = cifs_push_posix_locks(cfile);
1338 else
1339 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001340
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001341 cinode->can_cache_brlcks = false;
1342 up_write(&cinode->lock_sem);
1343 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001344}
1345
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001346static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001347cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001348 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001350 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001351 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001352 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001353 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001354 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001355 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001356 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001358 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001359 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001360 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001361 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001362 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001363 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001364 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001365 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001367 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001368 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001369 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001370 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001371 *lock = 1;
1372 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001373 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001374 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001375 *unlock = 1;
1376 /* Check if unlock includes more than one lock range */
1377 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001378 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001379 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001380 *lock = 1;
1381 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001382 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001383 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001384 *lock = 1;
1385 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001386 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001387 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001388 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001390 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001391}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001393static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001394cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001395 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001396{
1397 int rc = 0;
1398 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001399 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1400 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001401 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001402 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001404 if (posix_lck) {
1405 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001406
1407 rc = cifs_posix_lock_test(file, flock);
1408 if (!rc)
1409 return rc;
1410
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001411 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001412 posix_lock_type = CIFS_RDLCK;
1413 else
1414 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001415 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1416 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001417 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001418 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 return rc;
1420 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001421
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001422 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001423 if (!rc)
1424 return rc;
1425
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001426 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001427 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1428 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001429 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001430 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1431 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001432 flock->fl_type = F_UNLCK;
1433 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001434 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1435 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001436 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001437 }
1438
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001439 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001440 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001441 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001442 }
1443
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001444 type &= ~server->vals->exclusive_lock_type;
1445
1446 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1447 type | server->vals->shared_lock_type,
1448 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001449 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001450 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1451 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001452 flock->fl_type = F_RDLCK;
1453 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001454 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1455 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001456 } else
1457 flock->fl_type = F_WRLCK;
1458
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001459 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001460}
1461
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001462void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001463cifs_move_llist(struct list_head *source, struct list_head *dest)
1464{
1465 struct list_head *li, *tmp;
1466 list_for_each_safe(li, tmp, source)
1467 list_move(li, dest);
1468}
1469
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001470void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001471cifs_free_llist(struct list_head *llist)
1472{
1473 struct cifsLockInfo *li, *tmp;
1474 list_for_each_entry_safe(li, tmp, llist, llist) {
1475 cifs_del_lock_waiters(li);
1476 list_del(&li->llist);
1477 kfree(li);
1478 }
1479}
1480
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001481int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001482cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1483 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001484{
1485 int rc = 0, stored_rc;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001486 static const int types[] = {
1487 LOCKING_ANDX_LARGE_FILES,
1488 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1489 };
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001490 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001491 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001492 LOCKING_ANDX_RANGE *buf, *cur;
1493 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001494 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001495 struct cifsLockInfo *li, *tmp;
1496 __u64 length = 1 + flock->fl_end - flock->fl_start;
1497 struct list_head tmp_llist;
1498
1499 INIT_LIST_HEAD(&tmp_llist);
1500
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001501 /*
1502 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001503 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001504 */
1505 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001506 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001507 return -EINVAL;
1508
Ross Lagerwall92a81092019-01-08 18:30:56 +00001509 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1510 PAGE_SIZE);
1511 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1512 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001513 max_num = (max_buf - sizeof(struct smb_hdr)) /
1514 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001515 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001516 if (!buf)
1517 return -ENOMEM;
1518
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001519 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001520 for (i = 0; i < 2; i++) {
1521 cur = buf;
1522 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001523 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001524 if (flock->fl_start > li->offset ||
1525 (flock->fl_start + length) <
1526 (li->offset + li->length))
1527 continue;
1528 if (current->tgid != li->pid)
1529 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001530 if (types[i] != li->type)
1531 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001532 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001533 /*
1534 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001535 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001536 */
1537 list_del(&li->llist);
1538 cifs_del_lock_waiters(li);
1539 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001540 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001541 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001542 cur->Pid = cpu_to_le16(li->pid);
1543 cur->LengthLow = cpu_to_le32((u32)li->length);
1544 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1545 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1546 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1547 /*
1548 * We need to save a lock here to let us add it again to
1549 * the file's list if the unlock range request fails on
1550 * the server.
1551 */
1552 list_move(&li->llist, &tmp_llist);
1553 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001554 stored_rc = cifs_lockv(xid, tcon,
1555 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001556 li->type, num, 0, buf);
1557 if (stored_rc) {
1558 /*
1559 * We failed on the unlock range
1560 * request - add all locks from the tmp
1561 * list to the head of the file's list.
1562 */
1563 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001564 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001565 rc = stored_rc;
1566 } else
1567 /*
1568 * The unlock range request succeed -
1569 * free the tmp list.
1570 */
1571 cifs_free_llist(&tmp_llist);
1572 cur = buf;
1573 num = 0;
1574 } else
1575 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001576 }
1577 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001578 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001579 types[i], num, 0, buf);
1580 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001581 cifs_move_llist(&tmp_llist,
1582 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001583 rc = stored_rc;
1584 } else
1585 cifs_free_llist(&tmp_llist);
1586 }
1587 }
1588
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001589 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001590 kfree(buf);
1591 return rc;
1592}
1593
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001594static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001595cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001596 bool wait_flag, bool posix_lck, int lock, int unlock,
1597 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001598{
1599 int rc = 0;
1600 __u64 length = 1 + flock->fl_end - flock->fl_start;
1601 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1602 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001603 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001604 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001605
1606 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001607 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001608
1609 rc = cifs_posix_lock_set(file, flock);
1610 if (!rc || rc < 0)
1611 return rc;
1612
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001613 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001614 posix_lock_type = CIFS_RDLCK;
1615 else
1616 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001617
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001618 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001619 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001620
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001621 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001622 hash_lockowner(flock->fl_owner),
1623 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001624 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001625 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001626 }
1627
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001628 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001629 struct cifsLockInfo *lock;
1630
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001631 lock = cifs_lock_init(flock->fl_start, length, type,
1632 flock->fl_flags);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001633 if (!lock)
1634 return -ENOMEM;
1635
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001636 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001637 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001638 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001639 return rc;
1640 }
1641 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001642 goto out;
1643
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001644 /*
1645 * Windows 7 server can delay breaking lease from read to None
1646 * if we set a byte-range lock on a file - break it explicitly
1647 * before sending the lock to the server to be sure the next
1648 * read won't conflict with non-overlapted locks due to
1649 * pagereading.
1650 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001651 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1652 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001653 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001654 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1655 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001656 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001657 }
1658
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001659 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1660 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001661 if (rc) {
1662 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001663 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001664 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001665
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001666 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001667 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001668 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001669
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001670out:
Aurelien Aptelbc31d0c2019-03-14 18:44:16 +01001671 if (flock->fl_flags & FL_POSIX) {
1672 /*
1673 * If this is a request to remove all locks because we
1674 * are closing the file, it doesn't matter if the
1675 * unlocking failed as both cifs.ko and the SMB server
1676 * remove the lock on file close
1677 */
1678 if (rc) {
1679 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1680 if (!(flock->fl_flags & FL_CLOSE))
1681 return rc;
1682 }
Benjamin Coddington4f656362015-10-22 13:38:14 -04001683 rc = locks_lock_file_wait(file, flock);
Aurelien Aptelbc31d0c2019-03-14 18:44:16 +01001684 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001685 return rc;
1686}
1687
1688int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1689{
1690 int rc, xid;
1691 int lock = 0, unlock = 0;
1692 bool wait_flag = false;
1693 bool posix_lck = false;
1694 struct cifs_sb_info *cifs_sb;
1695 struct cifs_tcon *tcon;
1696 struct cifsInodeInfo *cinode;
1697 struct cifsFileInfo *cfile;
1698 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001699 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001700
1701 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001702 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001703
Joe Perchesf96637b2013-05-04 22:12:25 -05001704 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1705 cmd, flock->fl_flags, flock->fl_type,
1706 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001707
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001708 cfile = (struct cifsFileInfo *)file->private_data;
1709 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001710
1711 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1712 tcon->ses->server);
Al Viro7119e222014-10-22 00:25:12 -04001713 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001714 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001715 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001716
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001717 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001718 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1719 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1720 posix_lck = true;
1721 /*
1722 * BB add code here to normalize offset and length to account for
1723 * negative length which we can not accept over the wire.
1724 */
1725 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001726 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001727 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001728 return rc;
1729 }
1730
1731 if (!lock && !unlock) {
1732 /*
1733 * if no lock or unlock then nothing to do since we do not
1734 * know what it is
1735 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001736 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001737 return -EOPNOTSUPP;
1738 }
1739
1740 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1741 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001742 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 return rc;
1744}
1745
Jeff Layton597b0272012-03-23 14:40:56 -04001746/*
1747 * update the file size (if needed) after a write. Should be called with
1748 * the inode->i_lock held
1749 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001750void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001751cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1752 unsigned int bytes_written)
1753{
1754 loff_t end_of_write = offset + bytes_written;
1755
1756 if (end_of_write > cifsi->server_eof)
1757 cifsi->server_eof = end_of_write;
1758}
1759
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001760static ssize_t
1761cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1762 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763{
1764 int rc = 0;
1765 unsigned int bytes_written = 0;
1766 unsigned int total_written;
1767 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001768 struct cifs_tcon *tcon;
1769 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001770 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001771 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001772 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001773 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774
Jeff Layton7da4b492010-10-15 15:34:00 -04001775 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
Al Viro35c265e2014-08-19 20:25:34 -04001777 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1778 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001780 tcon = tlink_tcon(open_file->tlink);
1781 server = tcon->ses->server;
1782
1783 if (!server->ops->sync_write)
1784 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001785
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001786 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 for (total_written = 0; write_size > total_written;
1789 total_written += bytes_written) {
1790 rc = -EAGAIN;
1791 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001792 struct kvec iov[2];
1793 unsigned int len;
1794
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 /* we could deadlock if we called
1797 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001798 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001800 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 if (rc != 0)
1802 break;
1803 }
Steve French3e844692005-10-03 13:37:24 -07001804
David Howells2b0143b2015-03-17 22:25:59 +00001805 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001806 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001807 /* iov[0] is reserved for smb header */
1808 iov[1].iov_base = (char *)write_data + total_written;
1809 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001810 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001811 io_parms.tcon = tcon;
1812 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001813 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001814 rc = server->ops->sync_write(xid, &open_file->fid,
1815 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 }
1817 if (rc || (bytes_written == 0)) {
1818 if (total_written)
1819 break;
1820 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001821 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 return rc;
1823 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001824 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001825 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001826 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001827 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001828 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001829 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 }
1831
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001832 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833
Jeff Layton7da4b492010-10-15 15:34:00 -04001834 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001835 spin_lock(&d_inode(dentry)->i_lock);
1836 if (*offset > d_inode(dentry)->i_size)
1837 i_size_write(d_inode(dentry), *offset);
1838 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 }
David Howells2b0143b2015-03-17 22:25:59 +00001840 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001841 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 return total_written;
1843}
1844
Jeff Layton6508d902010-09-29 19:51:11 -04001845struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1846 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001847{
1848 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001849 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001850 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Layton6508d902010-09-29 19:51:11 -04001851
1852 /* only filter by fsuid on multiuser mounts */
1853 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1854 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001855
Steve French3afca262016-09-22 18:58:16 -05001856 spin_lock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001857 /* we could simply get the first_list_entry since write-only entries
1858 are always at the end of the list but since the first entry might
1859 have a close pending, we go through the whole list */
1860 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001861 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001862 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001863 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001864 if (!open_file->invalidHandle) {
1865 /* found a good file */
1866 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001867 cifsFileInfo_get(open_file);
1868 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001869 return open_file;
1870 } /* else might as well continue, and look for
1871 another, or simply have the caller reopen it
1872 again rather than trying to fix this handle */
1873 } else /* write only file */
1874 break; /* write only files are last so must be done */
1875 }
Steve French3afca262016-09-22 18:58:16 -05001876 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001877 return NULL;
1878}
Steve French630f3f0c2007-10-25 21:17:17 +00001879
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001880/* Return -EBADF if no handle is found and general rc otherwise */
1881int
1882cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
1883 struct cifsFileInfo **ret_file)
Steve French6148a742005-10-05 12:23:19 -07001884{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001885 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001886 struct cifs_sb_info *cifs_sb;
Steve French3afca262016-09-22 18:58:16 -05001887 struct cifs_tcon *tcon;
Jeff Layton2846d382008-09-22 21:33:33 -04001888 bool any_available = false;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001889 int rc = -EBADF;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001890 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001891
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001892 *ret_file = NULL;
1893
1894 /*
1895 * Having a null inode here (because mapping->host was set to zero by
1896 * the VFS or MM) should not happen but we had reports of on oops (due
1897 * to it being zero) during stress testcases so we need to check for it
1898 */
Steve French60808232006-04-22 15:53:05 +00001899
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001900 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001901 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001902 dump_stack();
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001903 return rc;
Steve French60808232006-04-22 15:53:05 +00001904 }
1905
Jeff Laytond3892292010-11-02 16:22:50 -04001906 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001907 tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Laytond3892292010-11-02 16:22:50 -04001908
Jeff Layton6508d902010-09-29 19:51:11 -04001909 /* only filter by fsuid on multiuser mounts */
1910 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1911 fsuid_only = false;
1912
Steve French3afca262016-09-22 18:58:16 -05001913 spin_lock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001914refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001915 if (refind > MAX_REOPEN_ATT) {
Steve French3afca262016-09-22 18:58:16 -05001916 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001917 return rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001918 }
Steve French6148a742005-10-05 12:23:19 -07001919 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001920 if (!any_available && open_file->pid != current->tgid)
1921 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001922 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001923 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001924 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001925 if (!open_file->invalidHandle) {
1926 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001927 cifsFileInfo_get(open_file);
1928 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001929 *ret_file = open_file;
1930 return 0;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001931 } else {
1932 if (!inv_file)
1933 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001934 }
Steve French6148a742005-10-05 12:23:19 -07001935 }
1936 }
Jeff Layton2846d382008-09-22 21:33:33 -04001937 /* couldn't find useable FH with same pid, try any available */
1938 if (!any_available) {
1939 any_available = true;
1940 goto refind_writable;
1941 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001942
1943 if (inv_file) {
1944 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001945 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001946 }
1947
Steve French3afca262016-09-22 18:58:16 -05001948 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001949
1950 if (inv_file) {
1951 rc = cifs_reopen_file(inv_file, false);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001952 if (!rc) {
1953 *ret_file = inv_file;
1954 return 0;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001955 }
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001956
Ronnie Sahlberg487317c2019-06-05 10:38:38 +10001957 spin_lock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001958 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
Ronnie Sahlberg487317c2019-06-05 10:38:38 +10001959 spin_unlock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001960 cifsFileInfo_put(inv_file);
1961 ++refind;
1962 inv_file = NULL;
1963 spin_lock(&tcon->open_file_lock);
1964 goto refind_writable;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001965 }
1966
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001967 return rc;
1968}
1969
1970struct cifsFileInfo *
1971find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
1972{
1973 struct cifsFileInfo *cfile;
1974 int rc;
1975
1976 rc = cifs_get_writable_file(cifs_inode, fsuid_only, &cfile);
1977 if (rc)
1978 cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc);
1979
1980 return cfile;
Steve French6148a742005-10-05 12:23:19 -07001981}
1982
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1984{
1985 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001986 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 char *write_data;
1988 int rc = -EFAULT;
1989 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001991 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992
1993 if (!mapping || !mapping->host)
1994 return -EFAULT;
1995
1996 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
1998 offset += (loff_t)from;
1999 write_data = kmap(page);
2000 write_data += from;
2001
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002002 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 kunmap(page);
2004 return -EIO;
2005 }
2006
2007 /* racing with truncate? */
2008 if (offset > mapping->host->i_size) {
2009 kunmap(page);
2010 return 0; /* don't care */
2011 }
2012
2013 /* check to make sure that we are not extending the file */
2014 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002015 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002017 rc = cifs_get_writable_file(CIFS_I(mapping->host), false, &open_file);
2018 if (!rc) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04002019 bytes_written = cifs_write(open_file, open_file->pid,
2020 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04002021 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07002023 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002024 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07002025 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002026 else if (bytes_written < 0)
2027 rc = bytes_written;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002028 else
2029 rc = -EFAULT;
Steve French6148a742005-10-05 12:23:19 -07002030 } else {
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002031 cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
2032 if (!is_retryable_error(rc))
2033 rc = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 }
2035
2036 kunmap(page);
2037 return rc;
2038}
2039
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002040static struct cifs_writedata *
2041wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2042 pgoff_t end, pgoff_t *index,
2043 unsigned int *found_pages)
2044{
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002045 struct cifs_writedata *wdata;
2046
2047 wdata = cifs_writedata_alloc((unsigned int)tofind,
2048 cifs_writev_complete);
2049 if (!wdata)
2050 return NULL;
2051
Jan Kara9c19a9c2017-11-15 17:35:26 -08002052 *found_pages = find_get_pages_range_tag(mapping, index, end,
2053 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002054 return wdata;
2055}
2056
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002057static unsigned int
2058wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2059 struct address_space *mapping,
2060 struct writeback_control *wbc,
2061 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2062{
2063 unsigned int nr_pages = 0, i;
2064 struct page *page;
2065
2066 for (i = 0; i < found_pages; i++) {
2067 page = wdata->pages[i];
2068 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002069 * At this point we hold neither the i_pages lock nor the
2070 * page lock: the page may be truncated or invalidated
2071 * (changing page->mapping to NULL), or even swizzled
2072 * back from swapper_space to tmpfs file mapping
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002073 */
2074
2075 if (nr_pages == 0)
2076 lock_page(page);
2077 else if (!trylock_page(page))
2078 break;
2079
2080 if (unlikely(page->mapping != mapping)) {
2081 unlock_page(page);
2082 break;
2083 }
2084
2085 if (!wbc->range_cyclic && page->index > end) {
2086 *done = true;
2087 unlock_page(page);
2088 break;
2089 }
2090
2091 if (*next && (page->index != *next)) {
2092 /* Not next consecutive page */
2093 unlock_page(page);
2094 break;
2095 }
2096
2097 if (wbc->sync_mode != WB_SYNC_NONE)
2098 wait_on_page_writeback(page);
2099
2100 if (PageWriteback(page) ||
2101 !clear_page_dirty_for_io(page)) {
2102 unlock_page(page);
2103 break;
2104 }
2105
2106 /*
2107 * This actually clears the dirty bit in the radix tree.
2108 * See cifs_writepage() for more commentary.
2109 */
2110 set_page_writeback(page);
2111 if (page_offset(page) >= i_size_read(mapping->host)) {
2112 *done = true;
2113 unlock_page(page);
2114 end_page_writeback(page);
2115 break;
2116 }
2117
2118 wdata->pages[i] = page;
2119 *next = page->index + 1;
2120 ++nr_pages;
2121 }
2122
2123 /* reset index to refind any pages skipped */
2124 if (nr_pages == 0)
2125 *index = wdata->pages[0]->index + 1;
2126
2127 /* put any pages we aren't going to use */
2128 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002129 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002130 wdata->pages[i] = NULL;
2131 }
2132
2133 return nr_pages;
2134}
2135
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002136static int
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002137wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2138 struct address_space *mapping, struct writeback_control *wbc)
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002139{
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002140 int rc;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002141 struct TCP_Server_Info *server =
2142 tlink_tcon(wdata->cfile->tlink)->ses->server;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002143
2144 wdata->sync_mode = wbc->sync_mode;
2145 wdata->nr_pages = nr_pages;
2146 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002147 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002148 wdata->tailsz = min(i_size_read(mapping->host) -
2149 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002150 (loff_t)PAGE_SIZE);
2151 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002152 wdata->pid = wdata->cfile->pid;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002153
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002154 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2155 if (rc)
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002156 return rc;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002157
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002158 if (wdata->cfile->invalidHandle)
2159 rc = -EAGAIN;
2160 else
2161 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002162
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002163 return rc;
2164}
2165
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002167 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168{
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002169 struct inode *inode = mapping->host;
2170 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002171 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002172 bool done = false, scanned = false, range_whole = false;
2173 pgoff_t end, index;
2174 struct cifs_writedata *wdata;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002175 struct cifsFileInfo *cfile = NULL;
Steve French37c0eb42005-10-05 14:50:29 -07002176 int rc = 0;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002177 int saved_rc = 0;
Steve French0cb012d2018-10-11 01:01:02 -05002178 unsigned int xid;
Steve French50c2f752007-07-13 00:33:32 +00002179
Steve French37c0eb42005-10-05 14:50:29 -07002180 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002181 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002182 * one page at a time via cifs_writepage
2183 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002184 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002185 return generic_writepages(mapping, wbc);
2186
Steve French0cb012d2018-10-11 01:01:02 -05002187 xid = get_xid();
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002188 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002189 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002190 end = -1;
2191 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002192 index = wbc->range_start >> PAGE_SHIFT;
2193 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002194 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002195 range_whole = true;
2196 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002197 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002198 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002199retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002200 while (!done && index <= end) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002201 unsigned int i, nr_pages, found_pages, wsize;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002202 pgoff_t next = 0, tofind, saved_index = index;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002203 struct cifs_credits credits_on_stack;
2204 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002205 int get_file_rc = 0;
Steve French37c0eb42005-10-05 14:50:29 -07002206
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002207 if (cfile)
2208 cifsFileInfo_put(cfile);
2209
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002210 rc = cifs_get_writable_file(CIFS_I(inode), false, &cfile);
2211
2212 /* in case of an error store it to return later */
2213 if (rc)
2214 get_file_rc = rc;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002215
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002216 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002217 &wsize, credits);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002218 if (rc != 0) {
2219 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002220 break;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002221 }
Steve French37c0eb42005-10-05 14:50:29 -07002222
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002223 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002224
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002225 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2226 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002227 if (!wdata) {
2228 rc = -ENOMEM;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002229 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002230 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002231 break;
2232 }
2233
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002234 if (found_pages == 0) {
2235 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002236 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002237 break;
2238 }
2239
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002240 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2241 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002242
2243 /* nothing to write? */
2244 if (nr_pages == 0) {
2245 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002246 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002247 continue;
2248 }
2249
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002250 wdata->credits = credits_on_stack;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002251 wdata->cfile = cfile;
2252 cfile = NULL;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002253
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002254 if (!wdata->cfile) {
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002255 cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
2256 get_file_rc);
2257 if (is_retryable_error(get_file_rc))
2258 rc = get_file_rc;
2259 else
2260 rc = -EBADF;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002261 } else
2262 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002263
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002264 for (i = 0; i < nr_pages; ++i)
2265 unlock_page(wdata->pages[i]);
2266
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002267 /* send failure -- clean up the mess */
2268 if (rc != 0) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002269 add_credits_and_wake_if(server, &wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002270 for (i = 0; i < nr_pages; ++i) {
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002271 if (is_retryable_error(rc))
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002272 redirty_page_for_writepage(wbc,
2273 wdata->pages[i]);
2274 else
2275 SetPageError(wdata->pages[i]);
2276 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002277 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002278 }
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002279 if (!is_retryable_error(rc))
Jeff Layton941b8532011-01-11 07:24:01 -05002280 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002281 }
2282 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002283
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002284 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2285 index = saved_index;
2286 continue;
2287 }
2288
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002289 /* Return immediately if we received a signal during writing */
2290 if (is_interrupt_error(rc)) {
2291 done = true;
2292 break;
2293 }
2294
2295 if (rc != 0 && saved_rc == 0)
2296 saved_rc = rc;
2297
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002298 wbc->nr_to_write -= nr_pages;
2299 if (wbc->nr_to_write <= 0)
2300 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002301
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002302 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002303 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002304
Steve French37c0eb42005-10-05 14:50:29 -07002305 if (!scanned && !done) {
2306 /*
2307 * We hit the last page and there is more work to be done: wrap
2308 * back to the start of the file
2309 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002310 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002311 index = 0;
2312 goto retry;
2313 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002314
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002315 if (saved_rc != 0)
2316 rc = saved_rc;
2317
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002318 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002319 mapping->writeback_index = index;
2320
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002321 if (cfile)
2322 cifsFileInfo_put(cfile);
Steve French0cb012d2018-10-11 01:01:02 -05002323 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 return rc;
2325}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002327static int
2328cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002330 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002331 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002333 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002335 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002336 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002337 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002338
2339 /*
2340 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2341 *
2342 * A writepage() implementation always needs to do either this,
2343 * or re-dirty the page with "redirty_page_for_writepage()" in
2344 * the case of a failure.
2345 *
2346 * Just unlocking the page will cause the radix tree tag-bits
2347 * to fail to update with the state of the page correctly.
2348 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002349 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002350retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002351 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002352 if (is_retryable_error(rc)) {
2353 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
Jeff Layton97b37f22017-05-25 06:59:52 -04002354 goto retry_write;
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002355 redirty_page_for_writepage(wbc, page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002356 } else if (rc != 0) {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002357 SetPageError(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002358 mapping_set_error(page->mapping, rc);
2359 } else {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002360 SetPageUptodate(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002361 }
Linus Torvaldscb876f42006-12-23 16:19:07 -08002362 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002363 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002364 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 return rc;
2366}
2367
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002368static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2369{
2370 int rc = cifs_writepage_locked(page, wbc);
2371 unlock_page(page);
2372 return rc;
2373}
2374
Nick Piggind9414772008-09-24 11:32:59 -04002375static int cifs_write_end(struct file *file, struct address_space *mapping,
2376 loff_t pos, unsigned len, unsigned copied,
2377 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378{
Nick Piggind9414772008-09-24 11:32:59 -04002379 int rc;
2380 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002381 struct cifsFileInfo *cfile = file->private_data;
2382 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2383 __u32 pid;
2384
2385 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2386 pid = cfile->pid;
2387 else
2388 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389
Joe Perchesf96637b2013-05-04 22:12:25 -05002390 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002391 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002392
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002393 if (PageChecked(page)) {
2394 if (copied == len)
2395 SetPageUptodate(page);
2396 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002397 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002398 SetPageUptodate(page);
2399
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002401 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002402 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002403 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002404
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002405 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 /* this is probably better than directly calling
2407 partialpage_write since in this function the file handle is
2408 known which we might as well leverage */
2409 /* BB check if anything else missing out of ppw
2410 such as updating last write time */
2411 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002412 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002413 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002415
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002416 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002417 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002418 rc = copied;
2419 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002420 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 }
2422
Nick Piggind9414772008-09-24 11:32:59 -04002423 if (rc > 0) {
2424 spin_lock(&inode->i_lock);
2425 if (pos > inode->i_size)
2426 i_size_write(inode, pos);
2427 spin_unlock(&inode->i_lock);
2428 }
2429
2430 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002431 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002432
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 return rc;
2434}
2435
Josef Bacik02c24a82011-07-16 20:44:56 -04002436int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2437 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002439 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002441 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002442 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002443 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002444 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002445 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002447 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002448 if (rc)
2449 return rc;
Josef Bacik02c24a82011-07-16 20:44:56 -04002450
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002451 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452
Al Viro35c265e2014-08-19 20:25:34 -04002453 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2454 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002455
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002456 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002457 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002458 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002459 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002460 rc = 0; /* don't care about it in fsync */
2461 }
2462 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002463
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002464 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002465 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2466 server = tcon->ses->server;
2467 if (server->ops->flush)
2468 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2469 else
2470 rc = -ENOSYS;
2471 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002472
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002473 free_xid(xid);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002474 return rc;
2475}
2476
Josef Bacik02c24a82011-07-16 20:44:56 -04002477int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002478{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002479 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002480 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002481 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002482 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002483 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002484 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002485
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002486 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002487 if (rc)
2488 return rc;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002489
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002490 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002491
Al Viro35c265e2014-08-19 20:25:34 -04002492 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2493 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002494
2495 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002496 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2497 server = tcon->ses->server;
2498 if (server->ops->flush)
2499 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2500 else
2501 rc = -ENOSYS;
2502 }
Steve Frenchb298f222009-02-21 21:17:43 +00002503
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002504 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 return rc;
2506}
2507
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508/*
2509 * As file closes, flush all cached write data for this inode checking
2510 * for write behind errors.
2511 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002512int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513{
Al Viro496ad9a2013-01-23 17:07:38 -05002514 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 int rc = 0;
2516
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002517 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002518 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002519
Joe Perchesf96637b2013-05-04 22:12:25 -05002520 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521
2522 return rc;
2523}
2524
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002525static int
2526cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2527{
2528 int rc = 0;
2529 unsigned long i;
2530
2531 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002532 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002533 if (!pages[i]) {
2534 /*
2535 * save number of pages we have already allocated and
2536 * return with ENOMEM error
2537 */
2538 num_pages = i;
2539 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002540 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002541 }
2542 }
2543
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002544 if (rc) {
2545 for (i = 0; i < num_pages; i++)
2546 put_page(pages[i]);
2547 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002548 return rc;
2549}
2550
2551static inline
2552size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2553{
2554 size_t num_pages;
2555 size_t clen;
2556
2557 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002558 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002559
2560 if (cur_len)
2561 *cur_len = clen;
2562
2563 return num_pages;
2564}
2565
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002566static void
Steve French4a5c80d2014-02-07 20:45:12 -06002567cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002568{
2569 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002570 struct cifs_writedata *wdata = container_of(refcount,
2571 struct cifs_writedata, refcount);
2572
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002573 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
Steve French4a5c80d2014-02-07 20:45:12 -06002574 for (i = 0; i < wdata->nr_pages; i++)
2575 put_page(wdata->pages[i]);
2576 cifs_writedata_release(refcount);
2577}
2578
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002579static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2580
Steve French4a5c80d2014-02-07 20:45:12 -06002581static void
2582cifs_uncached_writev_complete(struct work_struct *work)
2583{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002584 struct cifs_writedata *wdata = container_of(work,
2585 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002586 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002587 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2588
2589 spin_lock(&inode->i_lock);
2590 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2591 if (cifsi->server_eof > inode->i_size)
2592 i_size_write(inode, cifsi->server_eof);
2593 spin_unlock(&inode->i_lock);
2594
2595 complete(&wdata->done);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002596 collect_uncached_write_data(wdata->ctx);
2597 /* the below call can possibly free the last ref to aio ctx */
Steve French4a5c80d2014-02-07 20:45:12 -06002598 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002599}
2600
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002601static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002602wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2603 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002604{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002605 size_t save_len, copied, bytes, cur_len = *len;
2606 unsigned long i, nr_pages = *num_pages;
2607
2608 save_len = cur_len;
2609 for (i = 0; i < nr_pages; i++) {
2610 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2611 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2612 cur_len -= copied;
2613 /*
2614 * If we didn't copy as much as we expected, then that
2615 * may mean we trod into an unmapped area. Stop copying
2616 * at that point. On the next pass through the big
2617 * loop, we'll likely end up getting a zero-length
2618 * write and bailing out of it.
2619 */
2620 if (copied < bytes)
2621 break;
2622 }
2623 cur_len = save_len - cur_len;
2624 *len = cur_len;
2625
2626 /*
2627 * If we have no data to send, then that probably means that
2628 * the copy above failed altogether. That's most likely because
2629 * the address in the iovec was bogus. Return -EFAULT and let
2630 * the caller free anything we allocated and bail out.
2631 */
2632 if (!cur_len)
2633 return -EFAULT;
2634
2635 /*
2636 * i + 1 now represents the number of pages we actually used in
2637 * the copy phase above.
2638 */
2639 *num_pages = i + 1;
2640 return 0;
2641}
2642
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002643static int
Long Li8c5f9c12018-10-31 22:13:10 +00002644cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
2645 struct cifs_aio_ctx *ctx)
2646{
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002647 unsigned int wsize;
2648 struct cifs_credits credits;
Long Li8c5f9c12018-10-31 22:13:10 +00002649 int rc;
2650 struct TCP_Server_Info *server =
2651 tlink_tcon(wdata->cfile->tlink)->ses->server;
2652
Long Li8c5f9c12018-10-31 22:13:10 +00002653 do {
Long Lid53e2922019-03-15 07:54:59 +00002654 if (wdata->cfile->invalidHandle) {
Long Li8c5f9c12018-10-31 22:13:10 +00002655 rc = cifs_reopen_file(wdata->cfile, false);
Long Lid53e2922019-03-15 07:54:59 +00002656 if (rc == -EAGAIN)
2657 continue;
2658 else if (rc)
2659 break;
2660 }
2661
2662
2663 /*
2664 * Wait for credits to resend this wdata.
2665 * Note: we are attempting to resend the whole wdata not in
2666 * segments
2667 */
2668 do {
2669 rc = server->ops->wait_mtu_credits(server, wdata->bytes,
2670 &wsize, &credits);
2671 if (rc)
2672 goto fail;
2673
2674 if (wsize < wdata->bytes) {
2675 add_credits_and_wake_if(server, &credits, 0);
2676 msleep(1000);
2677 }
2678 } while (wsize < wdata->bytes);
2679 wdata->credits = credits;
2680
2681 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2682
2683 if (!rc) {
2684 if (wdata->cfile->invalidHandle)
2685 rc = -EAGAIN;
2686 else
2687 rc = server->ops->async_writev(wdata,
Long Li8c5f9c12018-10-31 22:13:10 +00002688 cifs_uncached_writedata_release);
Long Lid53e2922019-03-15 07:54:59 +00002689 }
Long Li8c5f9c12018-10-31 22:13:10 +00002690
Long Lid53e2922019-03-15 07:54:59 +00002691 /* If the write was successfully sent, we are done */
2692 if (!rc) {
2693 list_add_tail(&wdata->list, wdata_list);
2694 return 0;
2695 }
Long Li8c5f9c12018-10-31 22:13:10 +00002696
Long Lid53e2922019-03-15 07:54:59 +00002697 /* Roll back credits and retry if needed */
2698 add_credits_and_wake_if(server, &wdata->credits, 0);
2699 } while (rc == -EAGAIN);
2700
2701fail:
Long Li8c5f9c12018-10-31 22:13:10 +00002702 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Long Li8c5f9c12018-10-31 22:13:10 +00002703 return rc;
2704}
2705
2706static int
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002707cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2708 struct cifsFileInfo *open_file,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002709 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2710 struct cifs_aio_ctx *ctx)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002711{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002712 int rc = 0;
2713 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002714 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002715 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002716 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002717 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002718 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002719 struct TCP_Server_Info *server;
Long Li8c5f9c12018-10-31 22:13:10 +00002720 struct page **pagevec;
2721 size_t start;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002722 unsigned int xid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002723
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002724 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2725 pid = open_file->pid;
2726 else
2727 pid = current->tgid;
2728
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002729 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002730 xid = get_xid();
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002731
2732 do {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002733 unsigned int wsize;
2734 struct cifs_credits credits_on_stack;
2735 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002736
Pavel Shilovsky3e952992019-01-25 11:59:01 -08002737 if (open_file->invalidHandle) {
2738 rc = cifs_reopen_file(open_file, false);
2739 if (rc == -EAGAIN)
2740 continue;
2741 else if (rc)
2742 break;
2743 }
2744
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002745 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002746 &wsize, credits);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002747 if (rc)
2748 break;
2749
Long Lib6bc8a72018-12-16 23:17:04 +00002750 cur_len = min_t(const size_t, len, wsize);
2751
Long Li8c5f9c12018-10-31 22:13:10 +00002752 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05002753 ssize_t result;
2754
2755 result = iov_iter_get_pages_alloc(
Long Lib6bc8a72018-12-16 23:17:04 +00002756 from, &pagevec, cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05002757 if (result < 0) {
Long Li8c5f9c12018-10-31 22:13:10 +00002758 cifs_dbg(VFS,
2759 "direct_writev couldn't get user pages "
2760 "(rc=%zd) iter type %d iov_offset %zd "
2761 "count %zd\n",
Steve Frenchb98e26d2018-11-01 10:54:32 -05002762 result, from->type,
Long Li8c5f9c12018-10-31 22:13:10 +00002763 from->iov_offset, from->count);
2764 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00002765
2766 rc = result;
2767 add_credits_and_wake_if(server, credits, 0);
Long Li8c5f9c12018-10-31 22:13:10 +00002768 break;
2769 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05002770 cur_len = (size_t)result;
Long Li8c5f9c12018-10-31 22:13:10 +00002771 iov_iter_advance(from, cur_len);
2772
2773 nr_pages =
2774 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
2775
2776 wdata = cifs_writedata_direct_alloc(pagevec,
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002777 cifs_uncached_writev_complete);
Long Li8c5f9c12018-10-31 22:13:10 +00002778 if (!wdata) {
2779 rc = -ENOMEM;
2780 add_credits_and_wake_if(server, credits, 0);
2781 break;
2782 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002783
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002784
Long Li8c5f9c12018-10-31 22:13:10 +00002785 wdata->page_offset = start;
2786 wdata->tailsz =
2787 nr_pages > 1 ?
2788 cur_len - (PAGE_SIZE - start) -
2789 (nr_pages - 2) * PAGE_SIZE :
2790 cur_len;
2791 } else {
2792 nr_pages = get_numpages(wsize, len, &cur_len);
2793 wdata = cifs_writedata_alloc(nr_pages,
2794 cifs_uncached_writev_complete);
2795 if (!wdata) {
2796 rc = -ENOMEM;
2797 add_credits_and_wake_if(server, credits, 0);
2798 break;
2799 }
Jeff Layton5d81de82014-02-14 07:20:35 -05002800
Long Li8c5f9c12018-10-31 22:13:10 +00002801 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2802 if (rc) {
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08002803 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00002804 kfree(wdata);
2805 add_credits_and_wake_if(server, credits, 0);
2806 break;
2807 }
2808
2809 num_pages = nr_pages;
2810 rc = wdata_fill_from_iovec(
2811 wdata, from, &cur_len, &num_pages);
2812 if (rc) {
2813 for (i = 0; i < nr_pages; i++)
2814 put_page(wdata->pages[i]);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08002815 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00002816 kfree(wdata);
2817 add_credits_and_wake_if(server, credits, 0);
2818 break;
2819 }
2820
2821 /*
2822 * Bring nr_pages down to the number of pages we
2823 * actually used, and free any pages that we didn't use.
2824 */
2825 for ( ; nr_pages > num_pages; nr_pages--)
2826 put_page(wdata->pages[nr_pages - 1]);
2827
2828 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2829 }
Jeff Layton5d81de82014-02-14 07:20:35 -05002830
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002831 wdata->sync_mode = WB_SYNC_ALL;
2832 wdata->nr_pages = nr_pages;
2833 wdata->offset = (__u64)offset;
2834 wdata->cfile = cifsFileInfo_get(open_file);
2835 wdata->pid = pid;
2836 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002837 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002838 wdata->credits = credits_on_stack;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002839 wdata->ctx = ctx;
2840 kref_get(&ctx->refcount);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002841
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002842 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2843
2844 if (!rc) {
2845 if (wdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08002846 rc = -EAGAIN;
2847 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002848 rc = server->ops->async_writev(wdata,
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002849 cifs_uncached_writedata_release);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002850 }
2851
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002852 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002853 add_credits_and_wake_if(server, &wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002854 kref_put(&wdata->refcount,
2855 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002856 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002857 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002858 iov_iter_advance(from, offset - saved_offset);
2859 continue;
2860 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002861 break;
2862 }
2863
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002864 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002865 offset += cur_len;
2866 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002867 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002868
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002869 free_xid(xid);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002870 return rc;
2871}
2872
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002873static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2874{
2875 struct cifs_writedata *wdata, *tmp;
2876 struct cifs_tcon *tcon;
2877 struct cifs_sb_info *cifs_sb;
2878 struct dentry *dentry = ctx->cfile->dentry;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002879 int rc;
2880
2881 tcon = tlink_tcon(ctx->cfile->tlink);
2882 cifs_sb = CIFS_SB(dentry->d_sb);
2883
2884 mutex_lock(&ctx->aio_mutex);
2885
2886 if (list_empty(&ctx->list)) {
2887 mutex_unlock(&ctx->aio_mutex);
2888 return;
2889 }
2890
2891 rc = ctx->rc;
2892 /*
2893 * Wait for and collect replies for any successful sends in order of
2894 * increasing offset. Once an error is hit, then return without waiting
2895 * for any more replies.
2896 */
2897restart_loop:
2898 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
2899 if (!rc) {
2900 if (!try_wait_for_completion(&wdata->done)) {
2901 mutex_unlock(&ctx->aio_mutex);
2902 return;
2903 }
2904
2905 if (wdata->result)
2906 rc = wdata->result;
2907 else
2908 ctx->total_len += wdata->bytes;
2909
2910 /* resend call if it's a retryable error */
2911 if (rc == -EAGAIN) {
2912 struct list_head tmp_list;
2913 struct iov_iter tmp_from = ctx->iter;
2914
2915 INIT_LIST_HEAD(&tmp_list);
2916 list_del_init(&wdata->list);
2917
Long Li8c5f9c12018-10-31 22:13:10 +00002918 if (ctx->direct_io)
2919 rc = cifs_resend_wdata(
2920 wdata, &tmp_list, ctx);
2921 else {
2922 iov_iter_advance(&tmp_from,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002923 wdata->offset - ctx->pos);
2924
Long Li8c5f9c12018-10-31 22:13:10 +00002925 rc = cifs_write_from_iter(wdata->offset,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002926 wdata->bytes, &tmp_from,
2927 ctx->cfile, cifs_sb, &tmp_list,
2928 ctx);
Long Lid53e2922019-03-15 07:54:59 +00002929
2930 kref_put(&wdata->refcount,
2931 cifs_uncached_writedata_release);
Long Li8c5f9c12018-10-31 22:13:10 +00002932 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002933
2934 list_splice(&tmp_list, &ctx->list);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002935 goto restart_loop;
2936 }
2937 }
2938 list_del_init(&wdata->list);
2939 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2940 }
2941
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002942 cifs_stats_bytes_written(tcon, ctx->total_len);
2943 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
2944
2945 ctx->rc = (rc == 0) ? ctx->total_len : rc;
2946
2947 mutex_unlock(&ctx->aio_mutex);
2948
2949 if (ctx->iocb && ctx->iocb->ki_complete)
2950 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
2951 else
2952 complete(&ctx->done);
2953}
2954
Long Li8c5f9c12018-10-31 22:13:10 +00002955static ssize_t __cifs_writev(
2956 struct kiocb *iocb, struct iov_iter *from, bool direct)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002957{
Al Viroe9d15932015-04-06 22:44:11 -04002958 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002959 ssize_t total_written = 0;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002960 struct cifsFileInfo *cfile;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002961 struct cifs_tcon *tcon;
2962 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002963 struct cifs_aio_ctx *ctx;
Al Virofc56b982016-09-21 18:18:23 -04002964 struct iov_iter saved_from = *from;
Long Li8c5f9c12018-10-31 22:13:10 +00002965 size_t len = iov_iter_count(from);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002966 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002967
Al Viroe9d15932015-04-06 22:44:11 -04002968 /*
Long Li8c5f9c12018-10-31 22:13:10 +00002969 * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
2970 * In this case, fall back to non-direct write function.
2971 * this could be improved by getting pages directly in ITER_KVEC
Al Viroe9d15932015-04-06 22:44:11 -04002972 */
Long Li8c5f9c12018-10-31 22:13:10 +00002973 if (direct && from->type & ITER_KVEC) {
2974 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
2975 direct = false;
2976 }
Al Viroe9d15932015-04-06 22:44:11 -04002977
Al Viro3309dd02015-04-09 12:55:47 -04002978 rc = generic_write_checks(iocb, from);
2979 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002980 return rc;
2981
Al Viro7119e222014-10-22 00:25:12 -04002982 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002983 cfile = file->private_data;
2984 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002985
2986 if (!tcon->ses->server->ops->async_writev)
2987 return -ENOSYS;
2988
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002989 ctx = cifs_aio_ctx_alloc();
2990 if (!ctx)
2991 return -ENOMEM;
2992
2993 ctx->cfile = cifsFileInfo_get(cfile);
2994
2995 if (!is_sync_kiocb(iocb))
2996 ctx->iocb = iocb;
2997
2998 ctx->pos = iocb->ki_pos;
2999
Long Li8c5f9c12018-10-31 22:13:10 +00003000 if (direct) {
3001 ctx->direct_io = true;
3002 ctx->iter = *from;
3003 ctx->len = len;
3004 } else {
3005 rc = setup_aio_ctx_iter(ctx, from, WRITE);
3006 if (rc) {
3007 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3008 return rc;
3009 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003010 }
3011
3012 /* grab a lock here due to read response handlers can access ctx */
3013 mutex_lock(&ctx->aio_mutex);
3014
3015 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
3016 cfile, cifs_sb, &ctx->list, ctx);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003017
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003018 /*
3019 * If at least one write was successfully sent, then discard any rc
3020 * value from the later writes. If the other write succeeds, then
3021 * we'll end up returning whatever was written. If it fails, then
3022 * we'll get a new rc value from that.
3023 */
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003024 if (!list_empty(&ctx->list))
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003025 rc = 0;
3026
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003027 mutex_unlock(&ctx->aio_mutex);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003028
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003029 if (rc) {
3030 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3031 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003032 }
3033
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003034 if (!is_sync_kiocb(iocb)) {
3035 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3036 return -EIOCBQUEUED;
3037 }
3038
3039 rc = wait_for_completion_killable(&ctx->done);
3040 if (rc) {
3041 mutex_lock(&ctx->aio_mutex);
3042 ctx->rc = rc = -EINTR;
3043 total_written = ctx->total_len;
3044 mutex_unlock(&ctx->aio_mutex);
3045 } else {
3046 rc = ctx->rc;
3047 total_written = ctx->total_len;
3048 }
3049
3050 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3051
Al Viroe9d15932015-04-06 22:44:11 -04003052 if (unlikely(!total_written))
3053 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003054
Al Viroe9d15932015-04-06 22:44:11 -04003055 iocb->ki_pos += total_written;
Al Viroe9d15932015-04-06 22:44:11 -04003056 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003057}
3058
Long Li8c5f9c12018-10-31 22:13:10 +00003059ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
3060{
3061 return __cifs_writev(iocb, from, true);
3062}
3063
3064ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
3065{
3066 return __cifs_writev(iocb, from, false);
3067}
3068
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003069static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003070cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003071{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003072 struct file *file = iocb->ki_filp;
3073 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
3074 struct inode *inode = file->f_mapping->host;
3075 struct cifsInodeInfo *cinode = CIFS_I(inode);
3076 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04003077 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003078
Rabin Vincent966681c2017-06-29 16:01:42 +02003079 inode_lock(inode);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003080 /*
3081 * We need to hold the sem to be sure nobody modifies lock list
3082 * with a brlock that prevents writing.
3083 */
3084 down_read(&cinode->lock_sem);
Al Viro5f380c72015-04-07 11:28:12 -04003085
Al Viro3309dd02015-04-09 12:55:47 -04003086 rc = generic_write_checks(iocb, from);
3087 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04003088 goto out;
3089
Al Viro5f380c72015-04-07 11:28:12 -04003090 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Ronnie Sahlberg96457592018-10-04 09:24:38 +10003091 server->vals->exclusive_lock_type, 0,
3092 NULL, CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04003093 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04003094 else
3095 rc = -EACCES;
3096out:
Rabin Vincent966681c2017-06-29 16:01:42 +02003097 up_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05003098 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04003099
Christoph Hellwige2592212016-04-07 08:52:01 -07003100 if (rc > 0)
3101 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003102 return rc;
3103}
3104
3105ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003106cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003107{
Al Viro496ad9a2013-01-23 17:07:38 -05003108 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003109 struct cifsInodeInfo *cinode = CIFS_I(inode);
3110 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3111 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3112 iocb->ki_filp->private_data;
3113 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003114 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003115
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003116 written = cifs_get_writer(cinode);
3117 if (written)
3118 return written;
3119
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003120 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003121 if (cap_unix(tcon->ses) &&
3122 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003123 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04003124 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003125 goto out;
3126 }
Al Viro3dae8752014-04-03 12:05:17 -04003127 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003128 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04003129 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003130 /*
3131 * For non-oplocked files in strict cache mode we need to write the data
3132 * to the server exactly from the pos to pos+len-1 rather than flush all
3133 * affected pages because it may cause a error with mandatory locks on
3134 * these pages but not on the region from pos to ppos+len-1.
3135 */
Al Viro3dae8752014-04-03 12:05:17 -04003136 written = cifs_user_writev(iocb, from);
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003137 if (CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003138 /*
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003139 * We have read level caching and we have just sent a write
3140 * request to the server thus making data in the cache stale.
3141 * Zap the cache and set oplock/lease level to NONE to avoid
3142 * reading stale data from the cache. All subsequent read
3143 * operations will read new data from the server.
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003144 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003145 cifs_zap_mapping(inode);
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003146 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05003147 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003148 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003149 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003150out:
3151 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003152 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003153}
3154
Jeff Layton0471ca32012-05-16 07:13:16 -04003155static struct cifs_readdata *
Long Lif9f5aca2018-05-30 12:47:54 -07003156cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04003157{
3158 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003159
Long Lif9f5aca2018-05-30 12:47:54 -07003160 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04003161 if (rdata != NULL) {
Long Lif9f5aca2018-05-30 12:47:54 -07003162 rdata->pages = pages;
Jeff Layton6993f742012-05-16 07:13:17 -04003163 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003164 INIT_LIST_HEAD(&rdata->list);
3165 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04003166 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04003167 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003168
Jeff Layton0471ca32012-05-16 07:13:16 -04003169 return rdata;
3170}
3171
Long Lif9f5aca2018-05-30 12:47:54 -07003172static struct cifs_readdata *
3173cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
3174{
3175 struct page **pages =
Kees Cook6396bb22018-06-12 14:03:40 -07003176 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Long Lif9f5aca2018-05-30 12:47:54 -07003177 struct cifs_readdata *ret = NULL;
3178
3179 if (pages) {
3180 ret = cifs_readdata_direct_alloc(pages, complete);
3181 if (!ret)
3182 kfree(pages);
3183 }
3184
3185 return ret;
3186}
3187
Jeff Layton6993f742012-05-16 07:13:17 -04003188void
3189cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04003190{
Jeff Layton6993f742012-05-16 07:13:17 -04003191 struct cifs_readdata *rdata = container_of(refcount,
3192 struct cifs_readdata, refcount);
Long Libd3dcc62017-11-22 17:38:47 -07003193#ifdef CONFIG_CIFS_SMB_DIRECT
3194 if (rdata->mr) {
3195 smbd_deregister_mr(rdata->mr);
3196 rdata->mr = NULL;
3197 }
3198#endif
Jeff Layton6993f742012-05-16 07:13:17 -04003199 if (rdata->cfile)
3200 cifsFileInfo_put(rdata->cfile);
3201
Long Lif9f5aca2018-05-30 12:47:54 -07003202 kvfree(rdata->pages);
Jeff Layton0471ca32012-05-16 07:13:16 -04003203 kfree(rdata);
3204}
3205
Jeff Layton2a1bb132012-05-16 07:13:17 -04003206static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003207cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04003208{
3209 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003210 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04003211 unsigned int i;
3212
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003213 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04003214 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3215 if (!page) {
3216 rc = -ENOMEM;
3217 break;
3218 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003219 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04003220 }
3221
3222 if (rc) {
Roberto Bergantinos Corpas31fad7d2019-05-28 09:38:14 +02003223 unsigned int nr_page_failed = i;
3224
3225 for (i = 0; i < nr_page_failed; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003226 put_page(rdata->pages[i]);
3227 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04003228 }
3229 }
3230 return rc;
3231}
3232
3233static void
3234cifs_uncached_readdata_release(struct kref *refcount)
3235{
Jeff Layton1c892542012-05-16 07:13:17 -04003236 struct cifs_readdata *rdata = container_of(refcount,
3237 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003238 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003239
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003240 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003241 for (i = 0; i < rdata->nr_pages; i++) {
3242 put_page(rdata->pages[i]);
Jeff Layton1c892542012-05-16 07:13:17 -04003243 }
3244 cifs_readdata_release(refcount);
3245}
3246
Jeff Layton1c892542012-05-16 07:13:17 -04003247/**
3248 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3249 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05003250 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04003251 *
3252 * This function copies data from a list of pages in a readdata response into
3253 * an array of iovecs. It will first calculate where the data should go
3254 * based on the info in the readdata and then copy the data into that spot.
3255 */
Al Viro7f25bba2014-02-04 14:07:43 -05003256static int
3257cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04003258{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04003259 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003260 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003261
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003262 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003263 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02003264 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003265 size_t written;
3266
David Howells00e23702018-10-22 13:07:28 +01003267 if (unlikely(iov_iter_is_pipe(iter))) {
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003268 void *addr = kmap_atomic(page);
3269
3270 written = copy_to_iter(addr, copy, iter);
3271 kunmap_atomic(addr);
3272 } else
3273 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05003274 remaining -= written;
3275 if (written < copy && iov_iter_count(iter) > 0)
3276 break;
Jeff Layton1c892542012-05-16 07:13:17 -04003277 }
Al Viro7f25bba2014-02-04 14:07:43 -05003278 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04003279}
3280
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003281static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3282
Jeff Layton1c892542012-05-16 07:13:17 -04003283static void
3284cifs_uncached_readv_complete(struct work_struct *work)
3285{
3286 struct cifs_readdata *rdata = container_of(work,
3287 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04003288
3289 complete(&rdata->done);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003290 collect_uncached_read_data(rdata->ctx);
3291 /* the below call can possibly free the last ref to aio ctx */
Jeff Layton1c892542012-05-16 07:13:17 -04003292 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3293}
3294
3295static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003296uncached_fill_pages(struct TCP_Server_Info *server,
3297 struct cifs_readdata *rdata, struct iov_iter *iter,
3298 unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04003299{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003300 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003301 unsigned int i;
3302 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003303 unsigned int page_offset = rdata->page_offset;
Jeff Layton1c892542012-05-16 07:13:17 -04003304
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003305 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07003306 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003307 for (i = 0; i < nr_pages; i++) {
3308 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05003309 size_t n;
Long Li1dbe3462018-05-30 12:47:55 -07003310 unsigned int segment_size = rdata->pagesz;
3311
3312 if (i == 0)
3313 segment_size -= page_offset;
3314 else
3315 page_offset = 0;
3316
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003317
Al Viro71335662016-01-09 19:54:50 -05003318 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04003319 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003320 rdata->pages[i] = NULL;
3321 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04003322 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07003323 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003324 }
Long Li1dbe3462018-05-30 12:47:55 -07003325
Al Viro71335662016-01-09 19:54:50 -05003326 n = len;
Long Li1dbe3462018-05-30 12:47:55 -07003327 if (len >= segment_size)
Al Viro71335662016-01-09 19:54:50 -05003328 /* enough data to fill the page */
Long Li1dbe3462018-05-30 12:47:55 -07003329 n = segment_size;
3330 else
Al Viro71335662016-01-09 19:54:50 -05003331 rdata->tailsz = len;
Long Li1dbe3462018-05-30 12:47:55 -07003332 len -= n;
3333
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003334 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003335 result = copy_page_from_iter(
3336 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003337#ifdef CONFIG_CIFS_SMB_DIRECT
3338 else if (rdata->mr)
3339 result = n;
3340#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003341 else
Long Li1dbe3462018-05-30 12:47:55 -07003342 result = cifs_read_page_from_socket(
3343 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003344 if (result < 0)
3345 break;
3346
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003347 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003348 }
3349
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003350 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3351 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003352}
3353
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003354static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003355cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3356 struct cifs_readdata *rdata, unsigned int len)
3357{
3358 return uncached_fill_pages(server, rdata, NULL, len);
3359}
3360
3361static int
3362cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3363 struct cifs_readdata *rdata,
3364 struct iov_iter *iter)
3365{
3366 return uncached_fill_pages(server, rdata, iter, iter->count);
3367}
3368
Long Li6e6e2b82018-10-31 22:13:09 +00003369static int cifs_resend_rdata(struct cifs_readdata *rdata,
3370 struct list_head *rdata_list,
3371 struct cifs_aio_ctx *ctx)
3372{
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003373 unsigned int rsize;
3374 struct cifs_credits credits;
Long Li6e6e2b82018-10-31 22:13:09 +00003375 int rc;
3376 struct TCP_Server_Info *server =
3377 tlink_tcon(rdata->cfile->tlink)->ses->server;
3378
Long Li6e6e2b82018-10-31 22:13:09 +00003379 do {
Long Li0b0dfd52019-03-15 07:55:00 +00003380 if (rdata->cfile->invalidHandle) {
3381 rc = cifs_reopen_file(rdata->cfile, true);
3382 if (rc == -EAGAIN)
3383 continue;
3384 else if (rc)
3385 break;
3386 }
3387
3388 /*
3389 * Wait for credits to resend this rdata.
3390 * Note: we are attempting to resend the whole rdata not in
3391 * segments
3392 */
3393 do {
3394 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
Long Li6e6e2b82018-10-31 22:13:09 +00003395 &rsize, &credits);
3396
Long Li0b0dfd52019-03-15 07:55:00 +00003397 if (rc)
3398 goto fail;
Long Li6e6e2b82018-10-31 22:13:09 +00003399
Long Li0b0dfd52019-03-15 07:55:00 +00003400 if (rsize < rdata->bytes) {
3401 add_credits_and_wake_if(server, &credits, 0);
3402 msleep(1000);
3403 }
3404 } while (rsize < rdata->bytes);
3405 rdata->credits = credits;
3406
3407 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3408 if (!rc) {
3409 if (rdata->cfile->invalidHandle)
3410 rc = -EAGAIN;
3411 else
3412 rc = server->ops->async_readv(rdata);
Long Li6e6e2b82018-10-31 22:13:09 +00003413 }
Long Li6e6e2b82018-10-31 22:13:09 +00003414
Long Li0b0dfd52019-03-15 07:55:00 +00003415 /* If the read was successfully sent, we are done */
3416 if (!rc) {
3417 /* Add to aio pending list */
3418 list_add_tail(&rdata->list, rdata_list);
3419 return 0;
3420 }
Long Li6e6e2b82018-10-31 22:13:09 +00003421
Long Li0b0dfd52019-03-15 07:55:00 +00003422 /* Roll back credits and retry if needed */
3423 add_credits_and_wake_if(server, &rdata->credits, 0);
3424 } while (rc == -EAGAIN);
Long Li6e6e2b82018-10-31 22:13:09 +00003425
Long Li0b0dfd52019-03-15 07:55:00 +00003426fail:
3427 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Long Li6e6e2b82018-10-31 22:13:09 +00003428 return rc;
3429}
3430
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003431static int
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003432cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003433 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3434 struct cifs_aio_ctx *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003436 struct cifs_readdata *rdata;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003437 unsigned int npages, rsize;
3438 struct cifs_credits credits_on_stack;
3439 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003440 size_t cur_len;
3441 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003442 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003443 struct TCP_Server_Info *server;
Long Li6e6e2b82018-10-31 22:13:09 +00003444 struct page **pagevec;
3445 size_t start;
3446 struct iov_iter direct_iov = ctx->iter;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003447
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003448 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003449
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003450 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3451 pid = open_file->pid;
3452 else
3453 pid = current->tgid;
3454
Long Li6e6e2b82018-10-31 22:13:09 +00003455 if (ctx->direct_io)
3456 iov_iter_advance(&direct_iov, offset - ctx->pos);
3457
Jeff Layton1c892542012-05-16 07:13:17 -04003458 do {
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003459 if (open_file->invalidHandle) {
3460 rc = cifs_reopen_file(open_file, true);
3461 if (rc == -EAGAIN)
3462 continue;
3463 else if (rc)
3464 break;
3465 }
3466
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003467 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003468 &rsize, credits);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003469 if (rc)
3470 break;
3471
3472 cur_len = min_t(const size_t, len, rsize);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003473
Long Li6e6e2b82018-10-31 22:13:09 +00003474 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05003475 ssize_t result;
Long Li6e6e2b82018-10-31 22:13:09 +00003476
Steve Frenchb98e26d2018-11-01 10:54:32 -05003477 result = iov_iter_get_pages_alloc(
Long Li6e6e2b82018-10-31 22:13:09 +00003478 &direct_iov, &pagevec,
3479 cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05003480 if (result < 0) {
Long Li6e6e2b82018-10-31 22:13:09 +00003481 cifs_dbg(VFS,
Long Li54e94ff2018-12-16 22:41:07 +00003482 "couldn't get user pages (rc=%zd)"
Long Li6e6e2b82018-10-31 22:13:09 +00003483 " iter type %d"
3484 " iov_offset %zd count %zd\n",
Steve Frenchb98e26d2018-11-01 10:54:32 -05003485 result, direct_iov.type,
Long Li6e6e2b82018-10-31 22:13:09 +00003486 direct_iov.iov_offset,
3487 direct_iov.count);
3488 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00003489
3490 rc = result;
3491 add_credits_and_wake_if(server, credits, 0);
Long Li6e6e2b82018-10-31 22:13:09 +00003492 break;
3493 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05003494 cur_len = (size_t)result;
Long Li6e6e2b82018-10-31 22:13:09 +00003495 iov_iter_advance(&direct_iov, cur_len);
3496
3497 rdata = cifs_readdata_direct_alloc(
3498 pagevec, cifs_uncached_readv_complete);
3499 if (!rdata) {
3500 add_credits_and_wake_if(server, credits, 0);
3501 rc = -ENOMEM;
3502 break;
3503 }
3504
3505 npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
3506 rdata->page_offset = start;
3507 rdata->tailsz = npages > 1 ?
3508 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
3509 cur_len;
3510
3511 } else {
3512
3513 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3514 /* allocate a readdata struct */
3515 rdata = cifs_readdata_alloc(npages,
Jeff Layton1c892542012-05-16 07:13:17 -04003516 cifs_uncached_readv_complete);
Long Li6e6e2b82018-10-31 22:13:09 +00003517 if (!rdata) {
3518 add_credits_and_wake_if(server, credits, 0);
3519 rc = -ENOMEM;
3520 break;
3521 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003522
Long Li6e6e2b82018-10-31 22:13:09 +00003523 rc = cifs_read_allocate_pages(rdata, npages);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08003524 if (rc) {
3525 kvfree(rdata->pages);
3526 kfree(rdata);
3527 add_credits_and_wake_if(server, credits, 0);
3528 break;
3529 }
Long Li6e6e2b82018-10-31 22:13:09 +00003530
3531 rdata->tailsz = PAGE_SIZE;
3532 }
Jeff Layton1c892542012-05-16 07:13:17 -04003533
3534 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003535 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003536 rdata->offset = offset;
3537 rdata->bytes = cur_len;
3538 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003539 rdata->pagesz = PAGE_SIZE;
3540 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003541 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003542 rdata->credits = credits_on_stack;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003543 rdata->ctx = ctx;
3544 kref_get(&ctx->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003545
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003546 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3547
3548 if (!rc) {
3549 if (rdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003550 rc = -EAGAIN;
3551 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003552 rc = server->ops->async_readv(rdata);
3553 }
3554
Jeff Layton1c892542012-05-16 07:13:17 -04003555 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003556 add_credits_and_wake_if(server, &rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003557 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003558 cifs_uncached_readdata_release);
3559 if (rc == -EAGAIN) {
3560 iov_iter_revert(&direct_iov, cur_len);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003561 continue;
Long Li6e6e2b82018-10-31 22:13:09 +00003562 }
Jeff Layton1c892542012-05-16 07:13:17 -04003563 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564 }
Jeff Layton1c892542012-05-16 07:13:17 -04003565
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003566 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003567 offset += cur_len;
3568 len -= cur_len;
3569 } while (len > 0);
3570
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003571 return rc;
3572}
3573
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003574static void
3575collect_uncached_read_data(struct cifs_aio_ctx *ctx)
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003576{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003577 struct cifs_readdata *rdata, *tmp;
3578 struct iov_iter *to = &ctx->iter;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003579 struct cifs_sb_info *cifs_sb;
3580 struct cifs_tcon *tcon;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003581 int rc;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003582
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003583 tcon = tlink_tcon(ctx->cfile->tlink);
3584 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003585
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003586 mutex_lock(&ctx->aio_mutex);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003587
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003588 if (list_empty(&ctx->list)) {
3589 mutex_unlock(&ctx->aio_mutex);
3590 return;
3591 }
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003592
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003593 rc = ctx->rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003594 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003595again:
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003596 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
Jeff Layton1c892542012-05-16 07:13:17 -04003597 if (!rc) {
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003598 if (!try_wait_for_completion(&rdata->done)) {
3599 mutex_unlock(&ctx->aio_mutex);
3600 return;
3601 }
3602
3603 if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003604 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003605 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003606 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003607
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003608 list_del_init(&rdata->list);
3609 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003610
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003611 /*
3612 * Got a part of data and then reconnect has
3613 * happened -- fill the buffer and continue
3614 * reading.
3615 */
3616 if (got_bytes && got_bytes < rdata->bytes) {
Long Li6e6e2b82018-10-31 22:13:09 +00003617 rc = 0;
3618 if (!ctx->direct_io)
3619 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003620 if (rc) {
3621 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003622 cifs_uncached_readdata_release);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003623 continue;
3624 }
3625 }
3626
Long Li6e6e2b82018-10-31 22:13:09 +00003627 if (ctx->direct_io) {
3628 /*
3629 * Re-use rdata as this is a
3630 * direct I/O
3631 */
3632 rc = cifs_resend_rdata(
3633 rdata,
3634 &tmp_list, ctx);
3635 } else {
3636 rc = cifs_send_async_read(
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003637 rdata->offset + got_bytes,
3638 rdata->bytes - got_bytes,
3639 rdata->cfile, cifs_sb,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003640 &tmp_list, ctx);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003641
Long Li6e6e2b82018-10-31 22:13:09 +00003642 kref_put(&rdata->refcount,
3643 cifs_uncached_readdata_release);
3644 }
3645
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003646 list_splice(&tmp_list, &ctx->list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003647
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003648 goto again;
3649 } else if (rdata->result)
3650 rc = rdata->result;
Long Li6e6e2b82018-10-31 22:13:09 +00003651 else if (!ctx->direct_io)
Jeff Layton1c892542012-05-16 07:13:17 -04003652 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003653
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003654 /* if there was a short read -- discard anything left */
3655 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3656 rc = -ENODATA;
Long Li6e6e2b82018-10-31 22:13:09 +00003657
3658 ctx->total_len += rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003659 }
3660 list_del_init(&rdata->list);
3661 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003663
Jérôme Glisse13f59382019-04-10 15:37:47 -04003664 if (!ctx->direct_io)
Long Li6e6e2b82018-10-31 22:13:09 +00003665 ctx->total_len = ctx->len - iov_iter_count(to);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003666
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003667 /* mask nodata case */
3668 if (rc == -ENODATA)
3669 rc = 0;
3670
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003671 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3672
3673 mutex_unlock(&ctx->aio_mutex);
3674
3675 if (ctx->iocb && ctx->iocb->ki_complete)
3676 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3677 else
3678 complete(&ctx->done);
3679}
3680
Long Li6e6e2b82018-10-31 22:13:09 +00003681static ssize_t __cifs_readv(
3682 struct kiocb *iocb, struct iov_iter *to, bool direct)
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003683{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003684 size_t len;
Long Li6e6e2b82018-10-31 22:13:09 +00003685 struct file *file = iocb->ki_filp;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003686 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003687 struct cifsFileInfo *cfile;
Long Li6e6e2b82018-10-31 22:13:09 +00003688 struct cifs_tcon *tcon;
3689 ssize_t rc, total_read = 0;
3690 loff_t offset = iocb->ki_pos;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003691 struct cifs_aio_ctx *ctx;
3692
Long Li6e6e2b82018-10-31 22:13:09 +00003693 /*
3694 * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
3695 * fall back to data copy read path
3696 * this could be improved by getting pages directly in ITER_KVEC
3697 */
3698 if (direct && to->type & ITER_KVEC) {
3699 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
3700 direct = false;
3701 }
3702
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003703 len = iov_iter_count(to);
3704 if (!len)
3705 return 0;
3706
3707 cifs_sb = CIFS_FILE_SB(file);
3708 cfile = file->private_data;
3709 tcon = tlink_tcon(cfile->tlink);
3710
3711 if (!tcon->ses->server->ops->async_readv)
3712 return -ENOSYS;
3713
3714 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3715 cifs_dbg(FYI, "attempting read on write only file instance\n");
3716
3717 ctx = cifs_aio_ctx_alloc();
3718 if (!ctx)
3719 return -ENOMEM;
3720
3721 ctx->cfile = cifsFileInfo_get(cfile);
3722
3723 if (!is_sync_kiocb(iocb))
3724 ctx->iocb = iocb;
3725
David Howells00e23702018-10-22 13:07:28 +01003726 if (iter_is_iovec(to))
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003727 ctx->should_dirty = true;
3728
Long Li6e6e2b82018-10-31 22:13:09 +00003729 if (direct) {
3730 ctx->pos = offset;
3731 ctx->direct_io = true;
3732 ctx->iter = *to;
3733 ctx->len = len;
3734 } else {
3735 rc = setup_aio_ctx_iter(ctx, to, READ);
3736 if (rc) {
3737 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3738 return rc;
3739 }
3740 len = ctx->len;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003741 }
3742
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003743 /* grab a lock here due to read response handlers can access ctx */
3744 mutex_lock(&ctx->aio_mutex);
3745
3746 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
3747
3748 /* if at least one read request send succeeded, then reset rc */
3749 if (!list_empty(&ctx->list))
3750 rc = 0;
3751
3752 mutex_unlock(&ctx->aio_mutex);
3753
3754 if (rc) {
3755 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3756 return rc;
3757 }
3758
3759 if (!is_sync_kiocb(iocb)) {
3760 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3761 return -EIOCBQUEUED;
3762 }
3763
3764 rc = wait_for_completion_killable(&ctx->done);
3765 if (rc) {
3766 mutex_lock(&ctx->aio_mutex);
3767 ctx->rc = rc = -EINTR;
3768 total_read = ctx->total_len;
3769 mutex_unlock(&ctx->aio_mutex);
3770 } else {
3771 rc = ctx->rc;
3772 total_read = ctx->total_len;
3773 }
3774
3775 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3776
Al Viro0165e812014-02-04 14:19:48 -05003777 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003778 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003779 return total_read;
3780 }
3781 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003782}
3783
Long Li6e6e2b82018-10-31 22:13:09 +00003784ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
3785{
3786 return __cifs_readv(iocb, to, true);
3787}
3788
3789ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3790{
3791 return __cifs_readv(iocb, to, false);
3792}
3793
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003794ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003795cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003796{
Al Viro496ad9a2013-01-23 17:07:38 -05003797 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003798 struct cifsInodeInfo *cinode = CIFS_I(inode);
3799 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3800 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3801 iocb->ki_filp->private_data;
3802 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3803 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003804
3805 /*
3806 * In strict cache mode we need to read from the server all the time
3807 * if we don't have level II oplock because the server can delay mtime
3808 * change - so we can't make a decision about inode invalidating.
3809 * And we can also fail with pagereading if there are mandatory locks
3810 * on pages affected by this read but not on the region from pos to
3811 * pos+len-1.
3812 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003813 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003814 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003815
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003816 if (cap_unix(tcon->ses) &&
3817 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3818 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003819 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003820
3821 /*
3822 * We need to hold the sem to be sure nobody modifies lock list
3823 * with a brlock that prevents reading.
3824 */
3825 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003826 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003827 tcon->ses->server->vals->shared_lock_type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10003828 0, NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003829 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003830 up_read(&cinode->lock_sem);
3831 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003832}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003833
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003834static ssize_t
3835cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836{
3837 int rc = -EACCES;
3838 unsigned int bytes_read = 0;
3839 unsigned int total_read;
3840 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003841 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003843 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003844 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003845 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003846 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003847 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003848 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003849 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003850 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003852 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003853 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003855 /* FIXME: set up handlers for larger reads and/or convert to async */
3856 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3857
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303859 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003860 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303861 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003862 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003863 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003864 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003865 server = tcon->ses->server;
3866
3867 if (!server->ops->sync_read) {
3868 free_xid(xid);
3869 return -ENOSYS;
3870 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003872 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3873 pid = open_file->pid;
3874 else
3875 pid = current->tgid;
3876
Linus Torvalds1da177e2005-04-16 15:20:36 -07003877 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003878 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003880 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3881 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003882 do {
3883 current_read_size = min_t(uint, read_size - total_read,
3884 rsize);
3885 /*
3886 * For windows me and 9x we do not want to request more
3887 * than it negotiated since it will refuse the read
3888 * then.
3889 */
3890 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003891 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003892 current_read_size = min_t(uint,
3893 current_read_size, CIFSMaxBufSize);
3894 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003895 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003896 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897 if (rc != 0)
3898 break;
3899 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003900 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003901 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003902 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003903 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003904 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003905 &bytes_read, &cur_offset,
3906 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003907 } while (rc == -EAGAIN);
3908
Linus Torvalds1da177e2005-04-16 15:20:36 -07003909 if (rc || (bytes_read == 0)) {
3910 if (total_read) {
3911 break;
3912 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003913 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003914 return rc;
3915 }
3916 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003917 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003918 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003919 }
3920 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003921 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003922 return total_read;
3923}
3924
Jeff Laytonca83ce32011-04-12 09:13:44 -04003925/*
3926 * If the page is mmap'ed into a process' page tables, then we need to make
3927 * sure that it doesn't change while being written back.
3928 */
Souptick Joardera5240cb2018-04-15 00:58:25 +05303929static vm_fault_t
Dave Jiang11bac802017-02-24 14:56:41 -08003930cifs_page_mkwrite(struct vm_fault *vmf)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003931{
3932 struct page *page = vmf->page;
3933
3934 lock_page(page);
3935 return VM_FAULT_LOCKED;
3936}
3937
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003938static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003939 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003940 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003941 .page_mkwrite = cifs_page_mkwrite,
3942};
3943
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003944int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3945{
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003946 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05003947 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003948
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003949 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003950
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003951 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003952 rc = cifs_zap_mapping(inode);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003953 if (!rc)
3954 rc = generic_file_mmap(file, vma);
3955 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003956 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003957
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003958 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003959 return rc;
3960}
3961
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3963{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964 int rc, xid;
3965
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003966 xid = get_xid();
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003967
Jeff Laytonabab0952010-02-12 07:44:18 -05003968 rc = cifs_revalidate_file(file);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003969 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003970 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3971 rc);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003972 if (!rc)
3973 rc = generic_file_mmap(file, vma);
3974 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003975 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003976
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003977 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978 return rc;
3979}
3980
Jeff Layton0471ca32012-05-16 07:13:16 -04003981static void
3982cifs_readv_complete(struct work_struct *work)
3983{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003984 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003985 struct cifs_readdata *rdata = container_of(work,
3986 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003987
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003988 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003989 for (i = 0; i < rdata->nr_pages; i++) {
3990 struct page *page = rdata->pages[i];
3991
Jeff Layton0471ca32012-05-16 07:13:16 -04003992 lru_cache_add_file(page);
3993
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003994 if (rdata->result == 0 ||
3995 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003996 flush_dcache_page(page);
3997 SetPageUptodate(page);
3998 }
3999
4000 unlock_page(page);
4001
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004002 if (rdata->result == 0 ||
4003 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04004004 cifs_readpage_to_fscache(rdata->mapping->host, page);
4005
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004006 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004007
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004008 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004009 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04004010 }
Jeff Layton6993f742012-05-16 07:13:17 -04004011 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04004012}
4013
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004014static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004015readpages_fill_pages(struct TCP_Server_Info *server,
4016 struct cifs_readdata *rdata, struct iov_iter *iter,
4017 unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004018{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004019 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004020 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004021 u64 eof;
4022 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004023 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07004024 unsigned int page_offset = rdata->page_offset;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004025
4026 /* determine the eof that the server (probably) has */
4027 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004028 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05004029 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004030
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004031 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004032 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004033 for (i = 0; i < nr_pages; i++) {
4034 struct page *page = rdata->pages[i];
Long Li1dbe3462018-05-30 12:47:55 -07004035 unsigned int to_read = rdata->pagesz;
4036 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004037
Long Li1dbe3462018-05-30 12:47:55 -07004038 if (i == 0)
4039 to_read -= page_offset;
4040 else
4041 page_offset = 0;
4042
4043 n = to_read;
4044
4045 if (len >= to_read) {
4046 len -= to_read;
Jeff Layton8321fec2012-09-19 06:22:32 -07004047 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004048 /* enough for partial page, fill and zero the rest */
Long Li1dbe3462018-05-30 12:47:55 -07004049 zero_user(page, len + page_offset, to_read - len);
Al Viro71335662016-01-09 19:54:50 -05004050 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07004051 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004052 } else if (page->index > eof_index) {
4053 /*
4054 * The VFS will not try to do readahead past the
4055 * i_size, but it's possible that we have outstanding
4056 * writes with gaps in the middle and the i_size hasn't
4057 * caught up yet. Populate those with zeroed out pages
4058 * to prevent the VFS from repeatedly attempting to
4059 * fill them until the writes are flushed.
4060 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004061 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004062 lru_cache_add_file(page);
4063 flush_dcache_page(page);
4064 SetPageUptodate(page);
4065 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004066 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004067 rdata->pages[i] = NULL;
4068 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07004069 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004070 } else {
4071 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004072 lru_cache_add_file(page);
4073 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004074 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004075 rdata->pages[i] = NULL;
4076 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07004077 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004078 }
Jeff Layton8321fec2012-09-19 06:22:32 -07004079
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004080 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07004081 result = copy_page_from_iter(
4082 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07004083#ifdef CONFIG_CIFS_SMB_DIRECT
4084 else if (rdata->mr)
4085 result = n;
4086#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004087 else
Long Li1dbe3462018-05-30 12:47:55 -07004088 result = cifs_read_page_from_socket(
4089 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07004090 if (result < 0)
4091 break;
4092
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004093 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004094 }
4095
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004096 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
4097 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004098}
4099
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004100static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004101cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
4102 struct cifs_readdata *rdata, unsigned int len)
4103{
4104 return readpages_fill_pages(server, rdata, NULL, len);
4105}
4106
4107static int
4108cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
4109 struct cifs_readdata *rdata,
4110 struct iov_iter *iter)
4111{
4112 return readpages_fill_pages(server, rdata, iter, iter->count);
4113}
4114
4115static int
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004116readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
4117 unsigned int rsize, struct list_head *tmplist,
4118 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
4119{
4120 struct page *page, *tpage;
4121 unsigned int expected_index;
4122 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07004123 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004124
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004125 INIT_LIST_HEAD(tmplist);
4126
Nikolay Borisovf86196e2019-01-03 15:29:02 -08004127 page = lru_to_page(page_list);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004128
4129 /*
4130 * Lock the page and put it in the cache. Since no one else
4131 * should have access to this page, we're safe to simply set
4132 * PG_locked without checking it first.
4133 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004134 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004135 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07004136 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004137
4138 /* give up if we can't stick it in the cache */
4139 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004140 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004141 return rc;
4142 }
4143
4144 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004145 *offset = (loff_t)page->index << PAGE_SHIFT;
4146 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004147 *nr_pages = 1;
4148 list_move_tail(&page->lru, tmplist);
4149
4150 /* now try and add more pages onto the request */
4151 expected_index = page->index + 1;
4152 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
4153 /* discontinuity ? */
4154 if (page->index != expected_index)
4155 break;
4156
4157 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004158 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004159 break;
4160
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004161 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07004162 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004163 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004164 break;
4165 }
4166 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004167 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004168 expected_index++;
4169 (*nr_pages)++;
4170 }
4171 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172}
4173
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174static int cifs_readpages(struct file *file, struct address_space *mapping,
4175 struct list_head *page_list, unsigned num_pages)
4176{
Jeff Layton690c5e32011-10-19 15:30:16 -04004177 int rc;
4178 struct list_head tmplist;
4179 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04004180 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004181 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04004182 pid_t pid;
Steve French0cb012d2018-10-11 01:01:02 -05004183 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004184
Steve French0cb012d2018-10-11 01:01:02 -05004185 xid = get_xid();
Jeff Layton690c5e32011-10-19 15:30:16 -04004186 /*
Suresh Jayaraman566982362010-07-05 18:13:25 +05304187 * Reads as many pages as possible from fscache. Returns -ENOBUFS
4188 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00004189 *
4190 * After this point, every page in the list might have PG_fscache set,
4191 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman566982362010-07-05 18:13:25 +05304192 */
4193 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
4194 &num_pages);
Steve French0cb012d2018-10-11 01:01:02 -05004195 if (rc == 0) {
4196 free_xid(xid);
Jeff Layton690c5e32011-10-19 15:30:16 -04004197 return rc;
Steve French0cb012d2018-10-11 01:01:02 -05004198 }
Suresh Jayaraman566982362010-07-05 18:13:25 +05304199
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004200 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4201 pid = open_file->pid;
4202 else
4203 pid = current->tgid;
4204
Jeff Layton690c5e32011-10-19 15:30:16 -04004205 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004206 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207
Joe Perchesf96637b2013-05-04 22:12:25 -05004208 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4209 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04004210
4211 /*
4212 * Start with the page at end of list and move it to private
4213 * list. Do the same with any following pages until we hit
4214 * the rsize limit, hit an index discontinuity, or run out of
4215 * pages. Issue the async read and then start the loop again
4216 * until the list is empty.
4217 *
4218 * Note that list order is important. The page_list is in
4219 * the order of declining indexes. When we put the pages in
4220 * the rdata->pages, then we want them in increasing order.
4221 */
4222 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004223 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04004224 loff_t offset;
4225 struct page *page, *tpage;
4226 struct cifs_readdata *rdata;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004227 struct cifs_credits credits_on_stack;
4228 struct cifs_credits *credits = &credits_on_stack;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004230 if (open_file->invalidHandle) {
4231 rc = cifs_reopen_file(open_file, true);
4232 if (rc == -EAGAIN)
4233 continue;
4234 else if (rc)
4235 break;
4236 }
4237
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004238 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004239 &rsize, credits);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004240 if (rc)
4241 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004242
Jeff Layton690c5e32011-10-19 15:30:16 -04004243 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004244 * Give up immediately if rsize is too small to read an entire
4245 * page. The VFS will fall back to readpage. We should never
4246 * reach this point however since we set ra_pages to 0 when the
4247 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04004248 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004249 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004250 add_credits_and_wake_if(server, credits, 0);
Steve French0cb012d2018-10-11 01:01:02 -05004251 free_xid(xid);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004252 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004255 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
4256 &nr_pages, &offset, &bytes);
4257 if (rc) {
4258 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04004260 }
4261
Jeff Layton0471ca32012-05-16 07:13:16 -04004262 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04004263 if (!rdata) {
4264 /* best to give up if we're out of mem */
4265 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4266 list_del(&page->lru);
4267 lru_cache_add_file(page);
4268 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004269 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04004270 }
4271 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004272 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04004273 break;
4274 }
4275
Jeff Layton6993f742012-05-16 07:13:17 -04004276 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04004277 rdata->mapping = mapping;
4278 rdata->offset = offset;
4279 rdata->bytes = bytes;
4280 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004281 rdata->pagesz = PAGE_SIZE;
Long Li1dbe3462018-05-30 12:47:55 -07004282 rdata->tailsz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07004283 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004284 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004285 rdata->credits = credits_on_stack;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004286
4287 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4288 list_del(&page->lru);
4289 rdata->pages[rdata->nr_pages++] = page;
4290 }
Jeff Layton690c5e32011-10-19 15:30:16 -04004291
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004292 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4293
4294 if (!rc) {
4295 if (rdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004296 rc = -EAGAIN;
4297 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004298 rc = server->ops->async_readv(rdata);
4299 }
4300
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004301 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004302 add_credits_and_wake_if(server, &rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004303 for (i = 0; i < rdata->nr_pages; i++) {
4304 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04004305 lru_cache_add_file(page);
4306 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004307 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004308 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04004309 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04004310 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311 break;
4312 }
Jeff Layton6993f742012-05-16 07:13:17 -04004313
4314 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004315 }
4316
David Howells54afa992013-09-04 17:10:39 +00004317 /* Any pages that have been shown to fscache but didn't get added to
4318 * the pagecache must be uncached before they get returned to the
4319 * allocator.
4320 */
4321 cifs_fscache_readpages_cancel(mapping->host, page_list);
Steve French0cb012d2018-10-11 01:01:02 -05004322 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323 return rc;
4324}
4325
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01004326/*
4327 * cifs_readpage_worker must be called with the page pinned
4328 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004329static int cifs_readpage_worker(struct file *file, struct page *page,
4330 loff_t *poffset)
4331{
4332 char *read_data;
4333 int rc;
4334
Suresh Jayaraman566982362010-07-05 18:13:25 +05304335 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05004336 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304337 if (rc == 0)
4338 goto read_complete;
4339
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340 read_data = kmap(page);
4341 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004342
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004343 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004344
Linus Torvalds1da177e2005-04-16 15:20:36 -07004345 if (rc < 0)
4346 goto io_error;
4347 else
Joe Perchesf96637b2013-05-04 22:12:25 -05004348 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004349
Steve French9b9c5be2018-09-22 12:07:06 -05004350 /* we do not want atime to be less than mtime, it broke some apps */
4351 file_inode(file)->i_atime = current_time(file_inode(file));
4352 if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4353 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4354 else
4355 file_inode(file)->i_atime = current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004356
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004357 if (PAGE_SIZE > rc)
4358 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359
4360 flush_dcache_page(page);
4361 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304362
4363 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05004364 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304365
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004367
Linus Torvalds1da177e2005-04-16 15:20:36 -07004368io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004369 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004370 unlock_page(page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304371
4372read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373 return rc;
4374}
4375
4376static int cifs_readpage(struct file *file, struct page *page)
4377{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004378 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004379 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004380 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004382 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004383
4384 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304385 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004386 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304387 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388 }
4389
Joe Perchesf96637b2013-05-04 22:12:25 -05004390 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00004391 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004392
4393 rc = cifs_readpage_worker(file, page, &offset);
4394
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004395 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396 return rc;
4397}
4398
Steve Frencha403a0a2007-07-26 15:54:16 +00004399static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4400{
4401 struct cifsFileInfo *open_file;
Steve French3afca262016-09-22 18:58:16 -05004402 struct cifs_tcon *tcon =
4403 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
Steve Frencha403a0a2007-07-26 15:54:16 +00004404
Steve French3afca262016-09-22 18:58:16 -05004405 spin_lock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004406 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04004407 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French3afca262016-09-22 18:58:16 -05004408 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004409 return 1;
4410 }
4411 }
Steve French3afca262016-09-22 18:58:16 -05004412 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004413 return 0;
4414}
4415
Linus Torvalds1da177e2005-04-16 15:20:36 -07004416/* We do not want to update the file size from server for inodes
4417 open for write - to avoid races with writepage extending
4418 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004419 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004420 but this is tricky to do without racing with writebehind
4421 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00004422bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004423{
Steve Frencha403a0a2007-07-26 15:54:16 +00004424 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00004425 return true;
Steve French23e7dd72005-10-20 13:44:56 -07004426
Steve Frencha403a0a2007-07-26 15:54:16 +00004427 if (is_inode_writable(cifsInode)) {
4428 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08004429 struct cifs_sb_info *cifs_sb;
4430
Steve Frenchc32a0b62006-01-12 14:41:28 -08004431 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00004432 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004433 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08004434 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00004435 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08004436 }
4437
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004438 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00004439 return true;
Steve French7ba526312007-02-08 18:14:13 +00004440
Steve French4b18f2a2008-04-29 00:06:05 +00004441 return false;
Steve French23e7dd72005-10-20 13:44:56 -07004442 } else
Steve French4b18f2a2008-04-29 00:06:05 +00004443 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444}
4445
Nick Piggind9414772008-09-24 11:32:59 -04004446static int cifs_write_begin(struct file *file, struct address_space *mapping,
4447 loff_t pos, unsigned len, unsigned flags,
4448 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449{
Sachin Prabhu466bd312013-09-13 14:11:57 +01004450 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004451 pgoff_t index = pos >> PAGE_SHIFT;
4452 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004453 loff_t page_start = pos & PAGE_MASK;
4454 loff_t i_size;
4455 struct page *page;
4456 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457
Joe Perchesf96637b2013-05-04 22:12:25 -05004458 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04004459
Sachin Prabhu466bd312013-09-13 14:11:57 +01004460start:
Nick Piggin54566b22009-01-04 12:00:53 -08004461 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004462 if (!page) {
4463 rc = -ENOMEM;
4464 goto out;
4465 }
Nick Piggind9414772008-09-24 11:32:59 -04004466
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004467 if (PageUptodate(page))
4468 goto out;
Steve French8a236262007-03-06 00:31:00 +00004469
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004470 /*
4471 * If we write a full page it will be up to date, no need to read from
4472 * the server. If the write is short, we'll end up doing a sync write
4473 * instead.
4474 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004475 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004476 goto out;
4477
4478 /*
4479 * optimize away the read when we have an oplock, and we're not
4480 * expecting to use any of the data we'd be reading in. That
4481 * is, when the page lies beyond the EOF, or straddles the EOF
4482 * and the write will cover all of the existing data.
4483 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004484 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004485 i_size = i_size_read(mapping->host);
4486 if (page_start >= i_size ||
4487 (offset == 0 && (pos + len) >= i_size)) {
4488 zero_user_segments(page, 0, offset,
4489 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004490 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004491 /*
4492 * PageChecked means that the parts of the page
4493 * to which we're not writing are considered up
4494 * to date. Once the data is copied to the
4495 * page, it can be set uptodate.
4496 */
4497 SetPageChecked(page);
4498 goto out;
4499 }
4500 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501
Sachin Prabhu466bd312013-09-13 14:11:57 +01004502 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004503 /*
4504 * might as well read a page, it is fast enough. If we get
4505 * an error, we don't need to return it. cifs_write_end will
4506 * do a sync write instead since PG_uptodate isn't set.
4507 */
4508 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004509 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004510 oncethru = 1;
4511 goto start;
Steve French8a236262007-03-06 00:31:00 +00004512 } else {
4513 /* we could try using another file handle if there is one -
4514 but how would we lock it to prevent close of that handle
4515 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04004516 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00004517 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004518out:
4519 *pagep = page;
4520 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004521}
4522
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304523static int cifs_release_page(struct page *page, gfp_t gfp)
4524{
4525 if (PagePrivate(page))
4526 return 0;
4527
4528 return cifs_fscache_release_page(page, gfp);
4529}
4530
Lukas Czernerd47992f2013-05-21 23:17:23 -04004531static void cifs_invalidate_page(struct page *page, unsigned int offset,
4532 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304533{
4534 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4535
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004536 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304537 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4538}
4539
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004540static int cifs_launder_page(struct page *page)
4541{
4542 int rc = 0;
4543 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004544 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004545 struct writeback_control wbc = {
4546 .sync_mode = WB_SYNC_ALL,
4547 .nr_to_write = 0,
4548 .range_start = range_start,
4549 .range_end = range_end,
4550 };
4551
Joe Perchesf96637b2013-05-04 22:12:25 -05004552 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004553
4554 if (clear_page_dirty_for_io(page))
4555 rc = cifs_writepage_locked(page, &wbc);
4556
4557 cifs_fscache_invalidate_page(page, page->mapping->host);
4558 return rc;
4559}
4560
Tejun Heo9b646972010-07-20 22:09:02 +02004561void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04004562{
4563 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4564 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00004565 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004566 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004567 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004568 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004569 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004570
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004571 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10004572 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004573
4574 server->ops->downgrade_oplock(server, cinode,
4575 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
4576
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004577 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004578 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004579 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4580 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004581 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004582 }
4583
Jeff Layton3bc303c2009-09-21 06:47:50 -04004584 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004585 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05004586 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00004587 else
Al Viro8737c932009-12-24 06:47:55 -05004588 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004589 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004590 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004591 rc = filemap_fdatawait(inode->i_mapping);
4592 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004593 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004594 }
Joe Perchesf96637b2013-05-04 22:12:25 -05004595 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004596 }
4597
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004598 rc = cifs_push_locks(cfile);
4599 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004600 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004601
Jeff Layton3bc303c2009-09-21 06:47:50 -04004602 /*
4603 * releasing stale oplock after recent reconnect of smb session using
4604 * a now incorrect file handle is not a data integrity issue but do
4605 * not bother sending an oplock release if session to server still is
4606 * disconnected since oplock already released by the server
4607 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00004608 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004609 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4610 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05004611 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004612 }
Aurelien Aptelb98749c2019-03-29 10:49:12 +01004613 _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004614 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004615}
4616
Steve Frenchdca69282013-11-11 16:42:37 -06004617/*
4618 * The presence of cifs_direct_io() in the address space ops vector
4619 * allowes open() O_DIRECT flags which would have failed otherwise.
4620 *
4621 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4622 * so this method should never be called.
4623 *
4624 * Direct IO is not yet supported in the cached mode.
4625 */
4626static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07004627cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06004628{
4629 /*
4630 * FIXME
4631 * Eventually need to support direct IO for non forcedirectio mounts
4632 */
4633 return -EINVAL;
4634}
4635
4636
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004637const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638 .readpage = cifs_readpage,
4639 .readpages = cifs_readpages,
4640 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07004641 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004642 .write_begin = cifs_write_begin,
4643 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004644 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304645 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06004646 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304647 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004648 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004649};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004650
4651/*
4652 * cifs_readpages requires the server to support a buffer large enough to
4653 * contain the header plus one complete page of data. Otherwise, we need
4654 * to leave cifs_readpages out of the address space operations.
4655 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004656const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004657 .readpage = cifs_readpage,
4658 .writepage = cifs_writepage,
4659 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004660 .write_begin = cifs_write_begin,
4661 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004662 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304663 .releasepage = cifs_release_page,
4664 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004665 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004666};