blob: 2a6d20c0ce0288d37ad2405424200be3fefaa9f4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Nikolay Borisovf86196e2019-01-03 15:29:02 -080036#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/div64.h>
38#include "cifsfs.h"
39#include "cifspdu.h"
40#include "cifsglob.h"
41#include "cifsproto.h"
42#include "cifs_unicode.h"
43#include "cifs_debug.h"
44#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053045#include "fscache.h"
Long Libd3dcc62017-11-22 17:38:47 -070046#include "smbdirect.h"
Steve French07b92d02013-02-18 10:34:26 -060047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048static inline int cifs_convert_flags(unsigned int flags)
49{
50 if ((flags & O_ACCMODE) == O_RDONLY)
51 return GENERIC_READ;
52 else if ((flags & O_ACCMODE) == O_WRONLY)
53 return GENERIC_WRITE;
54 else if ((flags & O_ACCMODE) == O_RDWR) {
55 /* GENERIC_ALL is too much permission to request
56 can cause unnecessary access denied on create */
57 /* return GENERIC_ALL; */
58 return (GENERIC_READ | GENERIC_WRITE);
59 }
60
Jeff Laytone10f7b52008-05-14 10:21:33 -070061 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
62 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
63 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000064}
Jeff Laytone10f7b52008-05-14 10:21:33 -070065
Jeff Layton608712f2010-10-15 15:33:56 -040066static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000067{
Jeff Layton608712f2010-10-15 15:33:56 -040068 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070069
Steve French7fc8f4e2009-02-23 20:43:11 +000070 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000072 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040073 posix_flags = SMB_O_WRONLY;
74 else if ((flags & O_ACCMODE) == O_RDWR)
75 posix_flags = SMB_O_RDWR;
76
Steve French07b92d02013-02-18 10:34:26 -060077 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040078 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060079 if (flags & O_EXCL)
80 posix_flags |= SMB_O_EXCL;
81 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050082 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
83 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060084
Jeff Layton608712f2010-10-15 15:33:56 -040085 if (flags & O_TRUNC)
86 posix_flags |= SMB_O_TRUNC;
87 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010088 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000090 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040091 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000092 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040093 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000094 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040095 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000096
97 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098}
99
100static inline int cifs_get_disposition(unsigned int flags)
101{
102 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
103 return FILE_CREATE;
104 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
105 return FILE_OVERWRITE_IF;
106 else if ((flags & O_CREAT) == O_CREAT)
107 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000108 else if ((flags & O_TRUNC) == O_TRUNC)
109 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 else
111 return FILE_OPEN;
112}
113
Jeff Layton608712f2010-10-15 15:33:56 -0400114int cifs_posix_open(char *full_path, struct inode **pinode,
115 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400116 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400117{
118 int rc;
119 FILE_UNIX_BASIC_INFO *presp_data;
120 __u32 posix_flags = 0;
121 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
122 struct cifs_fattr fattr;
123 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000124 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400125
Joe Perchesf96637b2013-05-04 22:12:25 -0500126 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400127
128 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
129 if (presp_data == NULL)
130 return -ENOMEM;
131
132 tlink = cifs_sb_tlink(cifs_sb);
133 if (IS_ERR(tlink)) {
134 rc = PTR_ERR(tlink);
135 goto posix_open_ret;
136 }
137
138 tcon = tlink_tcon(tlink);
139 mode &= ~current_umask();
140
141 posix_flags = cifs_posix_convert_flags(f_flags);
142 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
143 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900144 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153 if (!pinode)
154 goto posix_open_ret; /* caller does not need info */
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158 /* get new inode and set it up */
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
167 cifs_fattr_to_inode(*pinode, &fattr);
168 }
169
170posix_open_ret:
171 kfree(presp_data);
172 return rc;
173}
174
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300175static int
176cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179{
180 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300182 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500183 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300184 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700185 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400186 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300187
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700188 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700189 return -ENOSYS;
190
191 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300192
193/*********************************************************************
194 * open flag mapping table:
195 *
196 * POSIX Flag CIFS Disposition
197 * ---------- ----------------
198 * O_CREAT FILE_OPEN_IF
199 * O_CREAT | O_EXCL FILE_CREATE
200 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
201 * O_TRUNC FILE_OVERWRITE
202 * none of the above FILE_OPEN
203 *
204 * Note that there is not a direct match between disposition
205 * FILE_SUPERSEDE (ie create whether or not file exists although
206 * O_CREAT | O_TRUNC is similar but truncates the existing
207 * file rather than creating a new file as FILE_SUPERSEDE does
208 * (which uses the attributes / metadata passed in on open call)
209 *?
210 *? O_SYNC is a reasonable match to CIFS writethrough flag
211 *? and the read write flags match reasonably. O_LARGEFILE
212 *? is irrelevant because largefile support is always used
213 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
214 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
215 *********************************************************************/
216
217 disposition = cifs_get_disposition(f_flags);
218
219 /* BB pass O_SYNC flag through on file attributes .. BB */
220
221 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500225 if (backup_cred(cifs_sb))
226 create_options |= CREATE_OPEN_BACKUP_INTENT;
227
Steve French1013e762017-09-22 01:40:27 -0500228 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
229 if (f_flags & O_SYNC)
230 create_options |= CREATE_WRITE_THROUGH;
231
232 if (f_flags & O_DIRECT)
233 create_options |= CREATE_NO_BUFFER;
234
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400235 oparms.tcon = tcon;
236 oparms.cifs_sb = cifs_sb;
237 oparms.desired_access = desired_access;
238 oparms.create_options = create_options;
239 oparms.disposition = disposition;
240 oparms.path = full_path;
241 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400242 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400243
244 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300245
246 if (rc)
247 goto out;
248
249 if (tcon->unix_ext)
250 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
251 xid);
252 else
253 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600254 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300255
256out:
257 kfree(buf);
258 return rc;
259}
260
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400261static bool
262cifs_has_mand_locks(struct cifsInodeInfo *cinode)
263{
264 struct cifs_fid_locks *cur;
265 bool has_locks = false;
266
267 down_read(&cinode->lock_sem);
268 list_for_each_entry(cur, &cinode->llist, llist) {
269 if (!list_empty(&cur->locks)) {
270 has_locks = true;
271 break;
272 }
273 }
274 up_read(&cinode->lock_sem);
275 return has_locks;
276}
277
Jeff Layton15ecb432010-10-15 15:34:02 -0400278struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700279cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400280 struct tcon_link *tlink, __u32 oplock)
281{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500282 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000283 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700284 struct cifsInodeInfo *cinode = CIFS_I(inode);
285 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700286 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700287 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400288 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400289
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700290 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
291 if (cfile == NULL)
292 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400293
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700294 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
295 if (!fdlocks) {
296 kfree(cfile);
297 return NULL;
298 }
299
300 INIT_LIST_HEAD(&fdlocks->locks);
301 fdlocks->cfile = cfile;
302 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700303 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700304 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700305 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700306
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700307 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700308 cfile->pid = current->tgid;
309 cfile->uid = current_fsuid();
310 cfile->dentry = dget(dentry);
311 cfile->f_flags = file->f_flags;
312 cfile->invalidHandle = false;
313 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700314 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700315 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500316 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400317
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100318 cifs_sb_active(inode->i_sb);
319
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400320 /*
321 * If the server returned a read oplock and we have mandatory brlocks,
322 * set oplock level to None.
323 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400324 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500325 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400326 oplock = 0;
327 }
328
Steve French3afca262016-09-22 18:58:16 -0500329 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400330 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700331 oplock = fid->pending_open->oplock;
332 list_del(&fid->pending_open->olist);
333
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400334 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400335 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700336
337 list_add(&cfile->tlist, &tcon->openFileList);
Steve Frenchfae80442018-10-19 17:14:32 -0500338 atomic_inc(&tcon->num_local_opens);
Steve French3afca262016-09-22 18:58:16 -0500339
Jeff Layton15ecb432010-10-15 15:34:02 -0400340 /* if readable file instance put first in list*/
341 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700342 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400343 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700344 list_add_tail(&cfile->flist, &cinode->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500345 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400346
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400347 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400348 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400349
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700350 file->private_data = cfile;
351 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400352}
353
Jeff Layton764a1b12012-07-25 14:59:54 -0400354struct cifsFileInfo *
355cifsFileInfo_get(struct cifsFileInfo *cifs_file)
356{
Steve French3afca262016-09-22 18:58:16 -0500357 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400358 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500359 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400360 return cifs_file;
361}
362
Steve Frenchcdff08e2010-10-21 22:46:14 +0000363/*
364 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400365 * the filehandle out on the server. Must be called without holding
Steve French3afca262016-09-22 18:58:16 -0500366 * tcon->open_file_lock and cifs_file->file_info_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000367 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400368void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
369{
David Howells2b0143b2015-03-17 22:25:59 +0000370 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000371 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700372 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300373 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100374 struct super_block *sb = inode->i_sb;
375 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000376 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700377 struct cifs_fid fid;
378 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000379 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000380
Steve French3afca262016-09-22 18:58:16 -0500381 spin_lock(&tcon->open_file_lock);
382
383 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400384 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500385 spin_unlock(&cifs_file->file_info_lock);
386 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000387 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400388 }
Steve French3afca262016-09-22 18:58:16 -0500389 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000390
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700391 if (server->ops->get_lease_key)
392 server->ops->get_lease_key(inode, &fid);
393
394 /* store open in pending opens to make sure we don't miss lease break */
395 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
396
Steve Frenchcdff08e2010-10-21 22:46:14 +0000397 /* remove it from the lists */
398 list_del(&cifs_file->flist);
399 list_del(&cifs_file->tlist);
Steve Frenchfae80442018-10-19 17:14:32 -0500400 atomic_dec(&tcon->num_local_opens);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000401
402 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500403 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000404 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700405 /*
406 * In strict cache mode we need invalidate mapping on the last
407 * close because it may cause a error when we open this file
408 * again and get at least level II oplock.
409 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300410 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400411 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300412 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000413 }
Steve French3afca262016-09-22 18:58:16 -0500414
415 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000416
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000417 oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
Jeff Laytonad635942011-07-26 12:20:17 -0400418
Steve Frenchcdff08e2010-10-21 22:46:14 +0000419 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700420 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400421 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700422
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400423 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700424 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400425 server->ops->close(xid, tcon, &cifs_file->fid);
426 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000427 }
428
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000429 if (oplock_break_cancelled)
430 cifs_done_oplock_break(cifsi);
431
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700432 cifs_del_pending_open(&open);
433
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700434 /*
435 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000436 * is closed anyway.
437 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700438 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700439 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000440 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400441 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000442 kfree(li);
443 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700444 list_del(&cifs_file->llist->llist);
445 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700446 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000447
448 cifs_put_tlink(cifs_file->tlink);
449 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100450 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000451 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400452}
453
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700455
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456{
457 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400458 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400459 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700461 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000462 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400463 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700464 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300466 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700467 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700468 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400470 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400473 tlink = cifs_sb_tlink(cifs_sb);
474 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400475 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400476 return PTR_ERR(tlink);
477 }
478 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700479 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500481 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530483 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400484 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 }
486
Joe Perchesf96637b2013-05-04 22:12:25 -0500487 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000488 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000489
Namjae Jeon787aded2014-08-22 14:22:51 +0900490 if (file->f_flags & O_DIRECT &&
491 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
492 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
493 file->f_op = &cifs_file_direct_nobrl_ops;
494 else
495 file->f_op = &cifs_file_direct_ops;
496 }
497
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700498 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000499 oplock = REQ_OPLOCK;
500 else
501 oplock = 0;
502
Steve French64cc2c62009-03-04 19:54:08 +0000503 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400504 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
505 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000506 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400507 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000508 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700509 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000510 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500511 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300512 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000513 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
514 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500515 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
516 tcon->ses->serverName,
517 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000518 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000519 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
520 (rc != -EOPNOTSUPP)) /* path not found or net err */
521 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700522 /*
523 * Else fallthrough to retry open the old way on network i/o
524 * or DFS errors.
525 */
Steve French276a74a2009-03-03 18:00:34 +0000526 }
527
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700528 if (server->ops->get_lease_key)
529 server->ops->get_lease_key(inode, &fid);
530
531 cifs_add_pending_open(&fid, tlink, &open);
532
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300533 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700534 if (server->ops->get_lease_key)
535 server->ops->get_lease_key(inode, &fid);
536
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300537 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700538 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700539 if (rc) {
540 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300541 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700542 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300543 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400544
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700545 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
546 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700547 if (server->ops->close)
548 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700549 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 rc = -ENOMEM;
551 goto out;
552 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530554 cifs_fscache_set_inode_cookie(inode, file);
555
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300556 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700557 /*
558 * Time to set mode which we can not set earlier due to
559 * problems creating new read-only files.
560 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300561 struct cifs_unix_set_info_args args = {
562 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800563 .uid = INVALID_UID, /* no change */
564 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300565 .ctime = NO_CHANGE_64,
566 .atime = NO_CHANGE_64,
567 .mtime = NO_CHANGE_64,
568 .device = 0,
569 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700570 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
571 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 }
573
574out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400576 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400577 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 return rc;
579}
580
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400581static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
582
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700583/*
584 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400585 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700586 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400587static int
588cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400590 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000591 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400592 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 int rc = 0;
594
Rabin Vincent560d3882017-05-03 17:17:21 +0200595 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400596 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400597 /* can cache locks - no need to relock */
598 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400599 return rc;
600 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400602 if (cap_unix(tcon->ses) &&
603 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
604 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
605 rc = cifs_push_posix_locks(cfile);
606 else
607 rc = tcon->ses->server->ops->push_mand_locks(cfile);
608
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400609 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 return rc;
611}
612
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700613static int
614cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615{
616 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400617 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400618 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000620 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700621 struct TCP_Server_Info *server;
622 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000623 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700625 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500627 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400628 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400630 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700631 mutex_lock(&cfile->fh_mutex);
632 if (!cfile->invalidHandle) {
633 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530634 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400635 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530636 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 }
638
David Howells2b0143b2015-03-17 22:25:59 +0000639 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700641 tcon = tlink_tcon(cfile->tlink);
642 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000643
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700644 /*
645 * Can not grab rename sem here because various ops, including those
646 * that already have the rename sem can end up causing writepage to get
647 * called and if the server was down that means we end up here, and we
648 * can never tell if the caller already has the rename_sem.
649 */
650 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000652 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700653 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400654 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000655 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 }
657
Joe Perchesf96637b2013-05-04 22:12:25 -0500658 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
659 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300661 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 oplock = REQ_OPLOCK;
663 else
Steve French4b18f2a2008-04-29 00:06:05 +0000664 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400666 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000667 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400668 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400669 /*
670 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
671 * original open. Must mask them off for a reopen.
672 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700673 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400674 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400675
Jeff Layton2422f672010-06-16 13:40:16 -0400676 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700677 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400678 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000679 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500680 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200681 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000682 goto reopen_success;
683 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700684 /*
685 * fallthrough to retry open the old way on errors, especially
686 * in the reconnect path it is important to retry hard
687 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000688 }
689
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700690 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000691
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500692 if (backup_cred(cifs_sb))
693 create_options |= CREATE_OPEN_BACKUP_INTENT;
694
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700695 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400696 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700697
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400698 oparms.tcon = tcon;
699 oparms.cifs_sb = cifs_sb;
700 oparms.desired_access = desired_access;
701 oparms.create_options = create_options;
702 oparms.disposition = disposition;
703 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400704 oparms.fid = &cfile->fid;
705 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400706
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700707 /*
708 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400709 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700710 * file might have write behind data that needs to be flushed and server
711 * version of file size can be stale. If we knew for sure that inode was
712 * not dirty locally we could do this.
713 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400714 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400715 if (rc == -ENOENT && oparms.reconnect == false) {
716 /* durable handle timeout is expired - open the file again */
717 rc = server->ops->open(xid, &oparms, &oplock, NULL);
718 /* indicate that we need to relock the file */
719 oparms.reconnect = true;
720 }
721
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700723 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500724 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
725 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400726 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 }
Jeff Layton15886172010-10-15 15:33:59 -0400728
729reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700730 cfile->invalidHandle = false;
731 mutex_unlock(&cfile->fh_mutex);
732 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400733
734 if (can_flush) {
735 rc = filemap_write_and_wait(inode->i_mapping);
Pavel Shilovsky9a663962019-01-08 11:15:28 -0800736 if (!is_interrupt_error(rc))
737 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400738
Jeff Layton15886172010-10-15 15:33:59 -0400739 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700740 rc = cifs_get_inode_info_unix(&inode, full_path,
741 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400742 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700743 rc = cifs_get_inode_info(&inode, full_path, NULL,
744 inode->i_sb, xid, NULL);
745 }
746 /*
747 * Else we are writing out data to server already and could deadlock if
748 * we tried to flush data, and since we do not know if we have data that
749 * would invalidate the current end of file on the server we can not go
750 * to the server to get the new inode info.
751 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300752
Pavel Shilovskyde740252016-10-11 15:34:07 -0700753 /*
754 * If the server returned a read oplock and we have mandatory brlocks,
755 * set oplock level to None.
756 */
757 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
758 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
759 oplock = 0;
760 }
761
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400762 server->ops->set_fid(cfile, &cfile->fid, oplock);
763 if (oparms.reconnect)
764 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400765
766reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400768 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 return rc;
770}
771
772int cifs_close(struct inode *inode, struct file *file)
773{
Jeff Layton77970692011-04-05 16:23:47 -0700774 if (file->private_data != NULL) {
775 cifsFileInfo_put(file->private_data);
776 file->private_data = NULL;
777 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
Steve Frenchcdff08e2010-10-21 22:46:14 +0000779 /* return code from the ->release op is always ignored */
780 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781}
782
Steve French52ace1e2016-09-22 19:23:56 -0500783void
784cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
785{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700786 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500787 struct list_head *tmp;
788 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700789 struct list_head tmp_list;
790
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800791 if (!tcon->use_persistent || !tcon->need_reopen_files)
792 return;
793
794 tcon->need_reopen_files = false;
795
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700796 cifs_dbg(FYI, "Reopen persistent handles");
797 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500798
799 /* list all files open on tree connection, reopen resilient handles */
800 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700801 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500802 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700803 if (!open_file->invalidHandle)
804 continue;
805 cifsFileInfo_get(open_file);
806 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500807 }
808 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700809
810 list_for_each_safe(tmp, tmp1, &tmp_list) {
811 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800812 if (cifs_reopen_file(open_file, false /* do not flush */))
813 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700814 list_del_init(&open_file->rlist);
815 cifsFileInfo_put(open_file);
816 }
Steve French52ace1e2016-09-22 19:23:56 -0500817}
818
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819int cifs_closedir(struct inode *inode, struct file *file)
820{
821 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400822 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700823 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700824 struct cifs_tcon *tcon;
825 struct TCP_Server_Info *server;
826 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827
Joe Perchesf96637b2013-05-04 22:12:25 -0500828 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700830 if (cfile == NULL)
831 return rc;
832
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400833 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700834 tcon = tlink_tcon(cfile->tlink);
835 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
Joe Perchesf96637b2013-05-04 22:12:25 -0500837 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500838 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400839 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700840 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500841 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700842 if (server->ops->close_dir)
843 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
844 else
845 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500846 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700847 /* not much we can do if it fails anyway, ignore rc */
848 rc = 0;
849 } else
Steve French3afca262016-09-22 18:58:16 -0500850 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700852 buf = cfile->srch_inf.ntwrk_buf_start;
853 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500854 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700855 cfile->srch_inf.ntwrk_buf_start = NULL;
856 if (cfile->srch_inf.smallBuf)
857 cifs_small_buf_release(buf);
858 else
859 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700861
862 cifs_put_tlink(cfile->tlink);
863 kfree(file->private_data);
864 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400866 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 return rc;
868}
869
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400870static struct cifsLockInfo *
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000871cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000872{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400873 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000874 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400875 if (!lock)
876 return lock;
877 lock->offset = offset;
878 lock->length = length;
879 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400880 lock->pid = current->tgid;
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000881 lock->flags = flags;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400882 INIT_LIST_HEAD(&lock->blist);
883 init_waitqueue_head(&lock->block_q);
884 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400885}
886
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700887void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400888cifs_del_lock_waiters(struct cifsLockInfo *lock)
889{
890 struct cifsLockInfo *li, *tmp;
891 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
892 list_del_init(&li->blist);
893 wake_up(&li->block_q);
894 }
895}
896
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400897#define CIFS_LOCK_OP 0
898#define CIFS_READ_OP 1
899#define CIFS_WRITE_OP 2
900
901/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400902static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700903cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000904 __u64 length, __u8 type, __u16 flags,
905 struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400906 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400907{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300908 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700909 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300910 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400911
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700912 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400913 if (offset + length <= li->offset ||
914 offset >= li->offset + li->length)
915 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400916 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
917 server->ops->compare_fids(cfile, cur_cfile)) {
918 /* shared lock prevents write op through the same fid */
919 if (!(li->type & server->vals->shared_lock_type) ||
920 rw_check != CIFS_WRITE_OP)
921 continue;
922 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700923 if ((type & server->vals->shared_lock_type) &&
924 ((server->ops->compare_fids(cfile, cur_cfile) &&
925 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400926 continue;
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000927 if (rw_check == CIFS_LOCK_OP &&
928 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
929 server->ops->compare_fids(cfile, cur_cfile))
930 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700931 if (conf_lock)
932 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700933 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400934 }
935 return false;
936}
937
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700938bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300939cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000940 __u8 type, __u16 flags,
941 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400942{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300943 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700944 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000945 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300946
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700947 list_for_each_entry(cur, &cinode->llist, llist) {
948 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000949 flags, cfile, conf_lock,
950 rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300951 if (rc)
952 break;
953 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300954
955 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400956}
957
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300958/*
959 * Check if there is another lock that prevents us to set the lock (mandatory
960 * style). If such a lock exists, update the flock structure with its
961 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
962 * or leave it the same if we can't. Returns 0 if we don't need to request to
963 * the server or 1 otherwise.
964 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400965static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300966cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
967 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400968{
969 int rc = 0;
970 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000971 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300972 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400973 bool exist;
974
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700975 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400976
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300977 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000978 flock->fl_flags, &conf_lock,
979 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400980 if (exist) {
981 flock->fl_start = conf_lock->offset;
982 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
983 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300984 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400985 flock->fl_type = F_RDLCK;
986 else
987 flock->fl_type = F_WRLCK;
988 } else if (!cinode->can_cache_brlcks)
989 rc = 1;
990 else
991 flock->fl_type = F_UNLCK;
992
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700993 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400994 return rc;
995}
996
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400997static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300998cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400999{
David Howells2b0143b2015-03-17 22:25:59 +00001000 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001001 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001002 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001003 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001004}
1005
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001006/*
1007 * Set the byte-range lock (mandatory style). Returns:
1008 * 1) 0, if we set the lock and don't need to request to the server;
1009 * 2) 1, if no locks prevent us but we need to request to the server;
Colin Ian King413d6102018-10-26 19:07:21 +01001010 * 3) -EACCES, if there is a lock that prevents us and wait is false.
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001011 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001012static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001013cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001014 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001015{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001016 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001017 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001018 bool exist;
1019 int rc = 0;
1020
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001021try_again:
1022 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001023 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001024
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001025 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001026 lock->type, lock->flags, &conf_lock,
1027 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001028 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001029 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001030 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001031 return rc;
1032 }
1033
1034 if (!exist)
1035 rc = 1;
1036 else if (!wait)
1037 rc = -EACCES;
1038 else {
1039 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001040 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001041 rc = wait_event_interruptible(lock->block_q,
1042 (lock->blist.prev == &lock->blist) &&
1043 (lock->blist.next == &lock->blist));
1044 if (!rc)
1045 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001046 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001047 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001048 }
1049
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001050 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001051 return rc;
1052}
1053
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001054/*
1055 * Check if there is another lock that prevents us to set the lock (posix
1056 * style). If such a lock exists, update the flock structure with its
1057 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1058 * or leave it the same if we can't. Returns 0 if we don't need to request to
1059 * the server or 1 otherwise.
1060 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001061static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001062cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1063{
1064 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001065 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001066 unsigned char saved_type = flock->fl_type;
1067
Pavel Shilovsky50792762011-10-29 17:17:57 +04001068 if ((flock->fl_flags & FL_POSIX) == 0)
1069 return 1;
1070
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001071 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001072 posix_test_lock(file, flock);
1073
1074 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1075 flock->fl_type = saved_type;
1076 rc = 1;
1077 }
1078
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001079 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001080 return rc;
1081}
1082
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001083/*
1084 * Set the byte-range lock (posix style). Returns:
1085 * 1) 0, if we set the lock and don't need to request to the server;
1086 * 2) 1, if we need to request to the server;
1087 * 3) <0, if the error occurs while setting the lock.
1088 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001089static int
1090cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1091{
Al Viro496ad9a2013-01-23 17:07:38 -05001092 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001093 int rc = 1;
1094
1095 if ((flock->fl_flags & FL_POSIX) == 0)
1096 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001097
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001098try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001099 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001100 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001101 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001102 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001103 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001104
1105 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001106 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001107 if (rc == FILE_LOCK_DEFERRED) {
NeilBrownada5c1d2018-11-30 10:04:08 +11001108 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_blocker);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001109 if (!rc)
1110 goto try_again;
NeilBrowncb03f942018-11-30 10:04:08 +11001111 locks_delete_block(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001112 }
Steve French9ebb3892012-04-01 13:52:54 -05001113 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001114}
1115
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001116int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001117cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001118{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001119 unsigned int xid;
1120 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001121 struct cifsLockInfo *li, *tmp;
1122 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001123 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001124 LOCKING_ANDX_RANGE *buf, *cur;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001125 static const int types[] = {
1126 LOCKING_ANDX_LARGE_FILES,
1127 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1128 };
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001129 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001130
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001131 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001132 tcon = tlink_tcon(cfile->tlink);
1133
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001134 /*
1135 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001136 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001137 */
1138 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001139 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001140 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001141 return -EINVAL;
1142 }
1143
Ross Lagerwall92a81092019-01-08 18:30:56 +00001144 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1145 PAGE_SIZE);
1146 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1147 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001148 max_num = (max_buf - sizeof(struct smb_hdr)) /
1149 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001150 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001151 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001152 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001153 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001154 }
1155
1156 for (i = 0; i < 2; i++) {
1157 cur = buf;
1158 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001159 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001160 if (li->type != types[i])
1161 continue;
1162 cur->Pid = cpu_to_le16(li->pid);
1163 cur->LengthLow = cpu_to_le32((u32)li->length);
1164 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1165 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1166 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1167 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001168 stored_rc = cifs_lockv(xid, tcon,
1169 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001170 (__u8)li->type, 0, num,
1171 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001172 if (stored_rc)
1173 rc = stored_rc;
1174 cur = buf;
1175 num = 0;
1176 } else
1177 cur++;
1178 }
1179
1180 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001181 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001182 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001183 if (stored_rc)
1184 rc = stored_rc;
1185 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001186 }
1187
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001188 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001189 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001190 return rc;
1191}
1192
Jeff Layton3d224622016-05-24 06:27:44 -04001193static __u32
1194hash_lockowner(fl_owner_t owner)
1195{
1196 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1197}
1198
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001199struct lock_to_push {
1200 struct list_head llist;
1201 __u64 offset;
1202 __u64 length;
1203 __u32 pid;
1204 __u16 netfid;
1205 __u8 type;
1206};
1207
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001208static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001209cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001210{
David Howells2b0143b2015-03-17 22:25:59 +00001211 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001212 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001213 struct file_lock *flock;
1214 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001215 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001216 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001217 struct list_head locks_to_send, *el;
1218 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001219 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001220
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001221 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001222
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001223 if (!flctx)
1224 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001225
Jeff Laytone084c1b2015-02-16 14:32:03 -05001226 spin_lock(&flctx->flc_lock);
1227 list_for_each(el, &flctx->flc_posix) {
1228 count++;
1229 }
1230 spin_unlock(&flctx->flc_lock);
1231
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001232 INIT_LIST_HEAD(&locks_to_send);
1233
1234 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001235 * Allocating count locks is enough because no FL_POSIX locks can be
1236 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001237 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001238 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001239 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001240 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1241 if (!lck) {
1242 rc = -ENOMEM;
1243 goto err_out;
1244 }
1245 list_add_tail(&lck->llist, &locks_to_send);
1246 }
1247
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001248 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001249 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001250 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001251 if (el == &locks_to_send) {
1252 /*
1253 * The list ended. We don't have enough allocated
1254 * structures - something is really wrong.
1255 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001256 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001257 break;
1258 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001259 length = 1 + flock->fl_end - flock->fl_start;
1260 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1261 type = CIFS_RDLCK;
1262 else
1263 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001264 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001265 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001266 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001267 lck->length = length;
1268 lck->type = type;
1269 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001270 }
Jeff Layton6109c852015-01-16 15:05:57 -05001271 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001272
1273 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001274 int stored_rc;
1275
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001276 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001277 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001278 lck->type, 0);
1279 if (stored_rc)
1280 rc = stored_rc;
1281 list_del(&lck->llist);
1282 kfree(lck);
1283 }
1284
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001285out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001286 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001287 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001288err_out:
1289 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1290 list_del(&lck->llist);
1291 kfree(lck);
1292 }
1293 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001294}
1295
1296static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001297cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001298{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001299 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001300 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001301 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001302 int rc = 0;
1303
1304 /* we are going to update can_cache_brlcks here - need a write access */
1305 down_write(&cinode->lock_sem);
1306 if (!cinode->can_cache_brlcks) {
1307 up_write(&cinode->lock_sem);
1308 return rc;
1309 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001310
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001311 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001312 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1313 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001314 rc = cifs_push_posix_locks(cfile);
1315 else
1316 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001317
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001318 cinode->can_cache_brlcks = false;
1319 up_write(&cinode->lock_sem);
1320 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001321}
1322
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001323static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001324cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001325 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001327 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001328 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001329 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001330 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001331 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001332 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001333 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001335 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001336 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001337 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001338 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001339 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001340 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001341 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001342 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001344 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001345 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001346 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001347 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001348 *lock = 1;
1349 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001350 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001351 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001352 *unlock = 1;
1353 /* Check if unlock includes more than one lock range */
1354 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001355 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001356 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001357 *lock = 1;
1358 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001359 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001360 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001361 *lock = 1;
1362 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001363 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001364 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001365 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001367 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001368}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001370static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001371cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001372 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001373{
1374 int rc = 0;
1375 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001376 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1377 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001378 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001379 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001381 if (posix_lck) {
1382 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001383
1384 rc = cifs_posix_lock_test(file, flock);
1385 if (!rc)
1386 return rc;
1387
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001388 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001389 posix_lock_type = CIFS_RDLCK;
1390 else
1391 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001392 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1393 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001394 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001395 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 return rc;
1397 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001398
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001399 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001400 if (!rc)
1401 return rc;
1402
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001403 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001404 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1405 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001406 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001407 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1408 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001409 flock->fl_type = F_UNLCK;
1410 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001411 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1412 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001413 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001414 }
1415
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001416 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001417 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001418 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001419 }
1420
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001421 type &= ~server->vals->exclusive_lock_type;
1422
1423 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1424 type | server->vals->shared_lock_type,
1425 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001426 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001427 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1428 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001429 flock->fl_type = F_RDLCK;
1430 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001431 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1432 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001433 } else
1434 flock->fl_type = F_WRLCK;
1435
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001436 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001437}
1438
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001439void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001440cifs_move_llist(struct list_head *source, struct list_head *dest)
1441{
1442 struct list_head *li, *tmp;
1443 list_for_each_safe(li, tmp, source)
1444 list_move(li, dest);
1445}
1446
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001447void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001448cifs_free_llist(struct list_head *llist)
1449{
1450 struct cifsLockInfo *li, *tmp;
1451 list_for_each_entry_safe(li, tmp, llist, llist) {
1452 cifs_del_lock_waiters(li);
1453 list_del(&li->llist);
1454 kfree(li);
1455 }
1456}
1457
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001458int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001459cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1460 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001461{
1462 int rc = 0, stored_rc;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001463 static const int types[] = {
1464 LOCKING_ANDX_LARGE_FILES,
1465 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1466 };
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001467 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001468 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001469 LOCKING_ANDX_RANGE *buf, *cur;
1470 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001471 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001472 struct cifsLockInfo *li, *tmp;
1473 __u64 length = 1 + flock->fl_end - flock->fl_start;
1474 struct list_head tmp_llist;
1475
1476 INIT_LIST_HEAD(&tmp_llist);
1477
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001478 /*
1479 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001480 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001481 */
1482 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001483 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001484 return -EINVAL;
1485
Ross Lagerwall92a81092019-01-08 18:30:56 +00001486 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1487 PAGE_SIZE);
1488 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1489 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001490 max_num = (max_buf - sizeof(struct smb_hdr)) /
1491 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001492 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001493 if (!buf)
1494 return -ENOMEM;
1495
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001496 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001497 for (i = 0; i < 2; i++) {
1498 cur = buf;
1499 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001500 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001501 if (flock->fl_start > li->offset ||
1502 (flock->fl_start + length) <
1503 (li->offset + li->length))
1504 continue;
1505 if (current->tgid != li->pid)
1506 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001507 if (types[i] != li->type)
1508 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001509 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001510 /*
1511 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001512 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001513 */
1514 list_del(&li->llist);
1515 cifs_del_lock_waiters(li);
1516 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001517 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001518 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001519 cur->Pid = cpu_to_le16(li->pid);
1520 cur->LengthLow = cpu_to_le32((u32)li->length);
1521 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1522 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1523 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1524 /*
1525 * We need to save a lock here to let us add it again to
1526 * the file's list if the unlock range request fails on
1527 * the server.
1528 */
1529 list_move(&li->llist, &tmp_llist);
1530 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001531 stored_rc = cifs_lockv(xid, tcon,
1532 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001533 li->type, num, 0, buf);
1534 if (stored_rc) {
1535 /*
1536 * We failed on the unlock range
1537 * request - add all locks from the tmp
1538 * list to the head of the file's list.
1539 */
1540 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001541 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001542 rc = stored_rc;
1543 } else
1544 /*
1545 * The unlock range request succeed -
1546 * free the tmp list.
1547 */
1548 cifs_free_llist(&tmp_llist);
1549 cur = buf;
1550 num = 0;
1551 } else
1552 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001553 }
1554 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001555 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001556 types[i], num, 0, buf);
1557 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001558 cifs_move_llist(&tmp_llist,
1559 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001560 rc = stored_rc;
1561 } else
1562 cifs_free_llist(&tmp_llist);
1563 }
1564 }
1565
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001566 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001567 kfree(buf);
1568 return rc;
1569}
1570
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001571static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001572cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001573 bool wait_flag, bool posix_lck, int lock, int unlock,
1574 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001575{
1576 int rc = 0;
1577 __u64 length = 1 + flock->fl_end - flock->fl_start;
1578 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1579 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001580 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001581 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001582
1583 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001584 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001585
1586 rc = cifs_posix_lock_set(file, flock);
1587 if (!rc || rc < 0)
1588 return rc;
1589
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001590 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001591 posix_lock_type = CIFS_RDLCK;
1592 else
1593 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001594
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001595 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001596 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001597
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001598 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001599 hash_lockowner(flock->fl_owner),
1600 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001601 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001602 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001603 }
1604
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001605 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001606 struct cifsLockInfo *lock;
1607
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001608 lock = cifs_lock_init(flock->fl_start, length, type,
1609 flock->fl_flags);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001610 if (!lock)
1611 return -ENOMEM;
1612
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001613 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001614 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001615 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001616 return rc;
1617 }
1618 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001619 goto out;
1620
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001621 /*
1622 * Windows 7 server can delay breaking lease from read to None
1623 * if we set a byte-range lock on a file - break it explicitly
1624 * before sending the lock to the server to be sure the next
1625 * read won't conflict with non-overlapted locks due to
1626 * pagereading.
1627 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001628 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1629 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001630 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001631 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1632 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001633 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001634 }
1635
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001636 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1637 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001638 if (rc) {
1639 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001640 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001641 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001642
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001643 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001644 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001645 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001646
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001647out:
Aurelien Aptelbc31d0c2019-03-14 18:44:16 +01001648 if (flock->fl_flags & FL_POSIX) {
1649 /*
1650 * If this is a request to remove all locks because we
1651 * are closing the file, it doesn't matter if the
1652 * unlocking failed as both cifs.ko and the SMB server
1653 * remove the lock on file close
1654 */
1655 if (rc) {
1656 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1657 if (!(flock->fl_flags & FL_CLOSE))
1658 return rc;
1659 }
Benjamin Coddington4f656362015-10-22 13:38:14 -04001660 rc = locks_lock_file_wait(file, flock);
Aurelien Aptelbc31d0c2019-03-14 18:44:16 +01001661 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001662 return rc;
1663}
1664
1665int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1666{
1667 int rc, xid;
1668 int lock = 0, unlock = 0;
1669 bool wait_flag = false;
1670 bool posix_lck = false;
1671 struct cifs_sb_info *cifs_sb;
1672 struct cifs_tcon *tcon;
1673 struct cifsInodeInfo *cinode;
1674 struct cifsFileInfo *cfile;
1675 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001676 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001677
1678 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001679 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001680
Joe Perchesf96637b2013-05-04 22:12:25 -05001681 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1682 cmd, flock->fl_flags, flock->fl_type,
1683 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001684
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001685 cfile = (struct cifsFileInfo *)file->private_data;
1686 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001687
1688 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1689 tcon->ses->server);
Al Viro7119e222014-10-22 00:25:12 -04001690 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001691 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001692 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001693
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001694 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001695 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1696 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1697 posix_lck = true;
1698 /*
1699 * BB add code here to normalize offset and length to account for
1700 * negative length which we can not accept over the wire.
1701 */
1702 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001703 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001704 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001705 return rc;
1706 }
1707
1708 if (!lock && !unlock) {
1709 /*
1710 * if no lock or unlock then nothing to do since we do not
1711 * know what it is
1712 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001713 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001714 return -EOPNOTSUPP;
1715 }
1716
1717 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1718 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001719 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 return rc;
1721}
1722
Jeff Layton597b0272012-03-23 14:40:56 -04001723/*
1724 * update the file size (if needed) after a write. Should be called with
1725 * the inode->i_lock held
1726 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001727void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001728cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1729 unsigned int bytes_written)
1730{
1731 loff_t end_of_write = offset + bytes_written;
1732
1733 if (end_of_write > cifsi->server_eof)
1734 cifsi->server_eof = end_of_write;
1735}
1736
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001737static ssize_t
1738cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1739 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740{
1741 int rc = 0;
1742 unsigned int bytes_written = 0;
1743 unsigned int total_written;
1744 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001745 struct cifs_tcon *tcon;
1746 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001747 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001748 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001749 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001750 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
Jeff Layton7da4b492010-10-15 15:34:00 -04001752 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753
Al Viro35c265e2014-08-19 20:25:34 -04001754 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1755 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001757 tcon = tlink_tcon(open_file->tlink);
1758 server = tcon->ses->server;
1759
1760 if (!server->ops->sync_write)
1761 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001762
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001763 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 for (total_written = 0; write_size > total_written;
1766 total_written += bytes_written) {
1767 rc = -EAGAIN;
1768 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001769 struct kvec iov[2];
1770 unsigned int len;
1771
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 /* we could deadlock if we called
1774 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001775 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001777 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 if (rc != 0)
1779 break;
1780 }
Steve French3e844692005-10-03 13:37:24 -07001781
David Howells2b0143b2015-03-17 22:25:59 +00001782 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001783 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001784 /* iov[0] is reserved for smb header */
1785 iov[1].iov_base = (char *)write_data + total_written;
1786 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001787 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001788 io_parms.tcon = tcon;
1789 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001790 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001791 rc = server->ops->sync_write(xid, &open_file->fid,
1792 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 }
1794 if (rc || (bytes_written == 0)) {
1795 if (total_written)
1796 break;
1797 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001798 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 return rc;
1800 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001801 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001802 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001803 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001804 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001805 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001806 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 }
1808
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001809 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
Jeff Layton7da4b492010-10-15 15:34:00 -04001811 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001812 spin_lock(&d_inode(dentry)->i_lock);
1813 if (*offset > d_inode(dentry)->i_size)
1814 i_size_write(d_inode(dentry), *offset);
1815 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 }
David Howells2b0143b2015-03-17 22:25:59 +00001817 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001818 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 return total_written;
1820}
1821
Jeff Layton6508d902010-09-29 19:51:11 -04001822struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1823 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001824{
1825 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001826 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001827 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Layton6508d902010-09-29 19:51:11 -04001828
1829 /* only filter by fsuid on multiuser mounts */
1830 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1831 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001832
Steve French3afca262016-09-22 18:58:16 -05001833 spin_lock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001834 /* we could simply get the first_list_entry since write-only entries
1835 are always at the end of the list but since the first entry might
1836 have a close pending, we go through the whole list */
1837 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001838 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001839 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001840 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001841 if (!open_file->invalidHandle) {
1842 /* found a good file */
1843 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001844 cifsFileInfo_get(open_file);
1845 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001846 return open_file;
1847 } /* else might as well continue, and look for
1848 another, or simply have the caller reopen it
1849 again rather than trying to fix this handle */
1850 } else /* write only file */
1851 break; /* write only files are last so must be done */
1852 }
Steve French3afca262016-09-22 18:58:16 -05001853 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001854 return NULL;
1855}
Steve French630f3f0c2007-10-25 21:17:17 +00001856
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001857/* Return -EBADF if no handle is found and general rc otherwise */
1858int
1859cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
1860 struct cifsFileInfo **ret_file)
Steve French6148a742005-10-05 12:23:19 -07001861{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001862 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001863 struct cifs_sb_info *cifs_sb;
Steve French3afca262016-09-22 18:58:16 -05001864 struct cifs_tcon *tcon;
Jeff Layton2846d382008-09-22 21:33:33 -04001865 bool any_available = false;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001866 int rc = -EBADF;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001867 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001868
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001869 *ret_file = NULL;
1870
1871 /*
1872 * Having a null inode here (because mapping->host was set to zero by
1873 * the VFS or MM) should not happen but we had reports of on oops (due
1874 * to it being zero) during stress testcases so we need to check for it
1875 */
Steve French60808232006-04-22 15:53:05 +00001876
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001877 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001878 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001879 dump_stack();
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001880 return rc;
Steve French60808232006-04-22 15:53:05 +00001881 }
1882
Jeff Laytond3892292010-11-02 16:22:50 -04001883 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001884 tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Laytond3892292010-11-02 16:22:50 -04001885
Jeff Layton6508d902010-09-29 19:51:11 -04001886 /* only filter by fsuid on multiuser mounts */
1887 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1888 fsuid_only = false;
1889
Steve French3afca262016-09-22 18:58:16 -05001890 spin_lock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001891refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001892 if (refind > MAX_REOPEN_ATT) {
Steve French3afca262016-09-22 18:58:16 -05001893 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001894 return rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001895 }
Steve French6148a742005-10-05 12:23:19 -07001896 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001897 if (!any_available && open_file->pid != current->tgid)
1898 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001899 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001900 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001901 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001902 if (!open_file->invalidHandle) {
1903 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001904 cifsFileInfo_get(open_file);
1905 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001906 *ret_file = open_file;
1907 return 0;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001908 } else {
1909 if (!inv_file)
1910 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001911 }
Steve French6148a742005-10-05 12:23:19 -07001912 }
1913 }
Jeff Layton2846d382008-09-22 21:33:33 -04001914 /* couldn't find useable FH with same pid, try any available */
1915 if (!any_available) {
1916 any_available = true;
1917 goto refind_writable;
1918 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001919
1920 if (inv_file) {
1921 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001922 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001923 }
1924
Steve French3afca262016-09-22 18:58:16 -05001925 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001926
1927 if (inv_file) {
1928 rc = cifs_reopen_file(inv_file, false);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001929 if (!rc) {
1930 *ret_file = inv_file;
1931 return 0;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001932 }
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001933
1934 spin_lock(&tcon->open_file_lock);
1935 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
1936 spin_unlock(&tcon->open_file_lock);
1937 cifsFileInfo_put(inv_file);
1938 ++refind;
1939 inv_file = NULL;
1940 spin_lock(&tcon->open_file_lock);
1941 goto refind_writable;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001942 }
1943
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001944 return rc;
1945}
1946
1947struct cifsFileInfo *
1948find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
1949{
1950 struct cifsFileInfo *cfile;
1951 int rc;
1952
1953 rc = cifs_get_writable_file(cifs_inode, fsuid_only, &cfile);
1954 if (rc)
1955 cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc);
1956
1957 return cfile;
Steve French6148a742005-10-05 12:23:19 -07001958}
1959
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1961{
1962 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001963 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 char *write_data;
1965 int rc = -EFAULT;
1966 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001968 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969
1970 if (!mapping || !mapping->host)
1971 return -EFAULT;
1972
1973 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974
1975 offset += (loff_t)from;
1976 write_data = kmap(page);
1977 write_data += from;
1978
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001979 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 kunmap(page);
1981 return -EIO;
1982 }
1983
1984 /* racing with truncate? */
1985 if (offset > mapping->host->i_size) {
1986 kunmap(page);
1987 return 0; /* don't care */
1988 }
1989
1990 /* check to make sure that we are not extending the file */
1991 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001992 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001994 rc = cifs_get_writable_file(CIFS_I(mapping->host), false, &open_file);
1995 if (!rc) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001996 bytes_written = cifs_write(open_file, open_file->pid,
1997 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001998 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07002000 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002001 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07002002 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002003 else if (bytes_written < 0)
2004 rc = bytes_written;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002005 else
2006 rc = -EFAULT;
Steve French6148a742005-10-05 12:23:19 -07002007 } else {
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002008 cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
2009 if (!is_retryable_error(rc))
2010 rc = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 }
2012
2013 kunmap(page);
2014 return rc;
2015}
2016
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002017static struct cifs_writedata *
2018wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2019 pgoff_t end, pgoff_t *index,
2020 unsigned int *found_pages)
2021{
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002022 struct cifs_writedata *wdata;
2023
2024 wdata = cifs_writedata_alloc((unsigned int)tofind,
2025 cifs_writev_complete);
2026 if (!wdata)
2027 return NULL;
2028
Jan Kara9c19a9c2017-11-15 17:35:26 -08002029 *found_pages = find_get_pages_range_tag(mapping, index, end,
2030 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002031 return wdata;
2032}
2033
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002034static unsigned int
2035wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2036 struct address_space *mapping,
2037 struct writeback_control *wbc,
2038 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2039{
2040 unsigned int nr_pages = 0, i;
2041 struct page *page;
2042
2043 for (i = 0; i < found_pages; i++) {
2044 page = wdata->pages[i];
2045 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002046 * At this point we hold neither the i_pages lock nor the
2047 * page lock: the page may be truncated or invalidated
2048 * (changing page->mapping to NULL), or even swizzled
2049 * back from swapper_space to tmpfs file mapping
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002050 */
2051
2052 if (nr_pages == 0)
2053 lock_page(page);
2054 else if (!trylock_page(page))
2055 break;
2056
2057 if (unlikely(page->mapping != mapping)) {
2058 unlock_page(page);
2059 break;
2060 }
2061
2062 if (!wbc->range_cyclic && page->index > end) {
2063 *done = true;
2064 unlock_page(page);
2065 break;
2066 }
2067
2068 if (*next && (page->index != *next)) {
2069 /* Not next consecutive page */
2070 unlock_page(page);
2071 break;
2072 }
2073
2074 if (wbc->sync_mode != WB_SYNC_NONE)
2075 wait_on_page_writeback(page);
2076
2077 if (PageWriteback(page) ||
2078 !clear_page_dirty_for_io(page)) {
2079 unlock_page(page);
2080 break;
2081 }
2082
2083 /*
2084 * This actually clears the dirty bit in the radix tree.
2085 * See cifs_writepage() for more commentary.
2086 */
2087 set_page_writeback(page);
2088 if (page_offset(page) >= i_size_read(mapping->host)) {
2089 *done = true;
2090 unlock_page(page);
2091 end_page_writeback(page);
2092 break;
2093 }
2094
2095 wdata->pages[i] = page;
2096 *next = page->index + 1;
2097 ++nr_pages;
2098 }
2099
2100 /* reset index to refind any pages skipped */
2101 if (nr_pages == 0)
2102 *index = wdata->pages[0]->index + 1;
2103
2104 /* put any pages we aren't going to use */
2105 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002106 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002107 wdata->pages[i] = NULL;
2108 }
2109
2110 return nr_pages;
2111}
2112
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002113static int
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002114wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2115 struct address_space *mapping, struct writeback_control *wbc)
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002116{
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002117 int rc;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002118 struct TCP_Server_Info *server =
2119 tlink_tcon(wdata->cfile->tlink)->ses->server;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002120
2121 wdata->sync_mode = wbc->sync_mode;
2122 wdata->nr_pages = nr_pages;
2123 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002124 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002125 wdata->tailsz = min(i_size_read(mapping->host) -
2126 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002127 (loff_t)PAGE_SIZE);
2128 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002129 wdata->pid = wdata->cfile->pid;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002130
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002131 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2132 if (rc)
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002133 return rc;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002134
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002135 if (wdata->cfile->invalidHandle)
2136 rc = -EAGAIN;
2137 else
2138 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002139
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002140 return rc;
2141}
2142
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002144 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145{
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002146 struct inode *inode = mapping->host;
2147 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002148 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002149 bool done = false, scanned = false, range_whole = false;
2150 pgoff_t end, index;
2151 struct cifs_writedata *wdata;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002152 struct cifsFileInfo *cfile = NULL;
Steve French37c0eb42005-10-05 14:50:29 -07002153 int rc = 0;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002154 int saved_rc = 0;
Steve French0cb012d2018-10-11 01:01:02 -05002155 unsigned int xid;
Steve French50c2f752007-07-13 00:33:32 +00002156
Steve French37c0eb42005-10-05 14:50:29 -07002157 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002158 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002159 * one page at a time via cifs_writepage
2160 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002161 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002162 return generic_writepages(mapping, wbc);
2163
Steve French0cb012d2018-10-11 01:01:02 -05002164 xid = get_xid();
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002165 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002166 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002167 end = -1;
2168 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002169 index = wbc->range_start >> PAGE_SHIFT;
2170 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002171 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002172 range_whole = true;
2173 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002174 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002175 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002176retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002177 while (!done && index <= end) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002178 unsigned int i, nr_pages, found_pages, wsize;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002179 pgoff_t next = 0, tofind, saved_index = index;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002180 struct cifs_credits credits_on_stack;
2181 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002182 int get_file_rc = 0;
Steve French37c0eb42005-10-05 14:50:29 -07002183
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002184 if (cfile)
2185 cifsFileInfo_put(cfile);
2186
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002187 rc = cifs_get_writable_file(CIFS_I(inode), false, &cfile);
2188
2189 /* in case of an error store it to return later */
2190 if (rc)
2191 get_file_rc = rc;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002192
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002193 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002194 &wsize, credits);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002195 if (rc != 0) {
2196 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002197 break;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002198 }
Steve French37c0eb42005-10-05 14:50:29 -07002199
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002200 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002201
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002202 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2203 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002204 if (!wdata) {
2205 rc = -ENOMEM;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002206 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002207 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002208 break;
2209 }
2210
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002211 if (found_pages == 0) {
2212 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002213 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002214 break;
2215 }
2216
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002217 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2218 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002219
2220 /* nothing to write? */
2221 if (nr_pages == 0) {
2222 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002223 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002224 continue;
2225 }
2226
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002227 wdata->credits = credits_on_stack;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002228 wdata->cfile = cfile;
2229 cfile = NULL;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002230
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002231 if (!wdata->cfile) {
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002232 cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
2233 get_file_rc);
2234 if (is_retryable_error(get_file_rc))
2235 rc = get_file_rc;
2236 else
2237 rc = -EBADF;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002238 } else
2239 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002240
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002241 for (i = 0; i < nr_pages; ++i)
2242 unlock_page(wdata->pages[i]);
2243
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002244 /* send failure -- clean up the mess */
2245 if (rc != 0) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002246 add_credits_and_wake_if(server, &wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002247 for (i = 0; i < nr_pages; ++i) {
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002248 if (is_retryable_error(rc))
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002249 redirty_page_for_writepage(wbc,
2250 wdata->pages[i]);
2251 else
2252 SetPageError(wdata->pages[i]);
2253 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002254 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002255 }
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002256 if (!is_retryable_error(rc))
Jeff Layton941b8532011-01-11 07:24:01 -05002257 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002258 }
2259 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002260
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002261 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2262 index = saved_index;
2263 continue;
2264 }
2265
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002266 /* Return immediately if we received a signal during writing */
2267 if (is_interrupt_error(rc)) {
2268 done = true;
2269 break;
2270 }
2271
2272 if (rc != 0 && saved_rc == 0)
2273 saved_rc = rc;
2274
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002275 wbc->nr_to_write -= nr_pages;
2276 if (wbc->nr_to_write <= 0)
2277 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002278
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002279 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002280 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002281
Steve French37c0eb42005-10-05 14:50:29 -07002282 if (!scanned && !done) {
2283 /*
2284 * We hit the last page and there is more work to be done: wrap
2285 * back to the start of the file
2286 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002287 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002288 index = 0;
2289 goto retry;
2290 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002291
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002292 if (saved_rc != 0)
2293 rc = saved_rc;
2294
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002295 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002296 mapping->writeback_index = index;
2297
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002298 if (cfile)
2299 cifsFileInfo_put(cfile);
Steve French0cb012d2018-10-11 01:01:02 -05002300 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 return rc;
2302}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002304static int
2305cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002307 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002308 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002310 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002312 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002313 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002314 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002315
2316 /*
2317 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2318 *
2319 * A writepage() implementation always needs to do either this,
2320 * or re-dirty the page with "redirty_page_for_writepage()" in
2321 * the case of a failure.
2322 *
2323 * Just unlocking the page will cause the radix tree tag-bits
2324 * to fail to update with the state of the page correctly.
2325 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002326 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002327retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002328 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002329 if (is_retryable_error(rc)) {
2330 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
Jeff Layton97b37f22017-05-25 06:59:52 -04002331 goto retry_write;
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002332 redirty_page_for_writepage(wbc, page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002333 } else if (rc != 0) {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002334 SetPageError(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002335 mapping_set_error(page->mapping, rc);
2336 } else {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002337 SetPageUptodate(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002338 }
Linus Torvaldscb876f42006-12-23 16:19:07 -08002339 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002340 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002341 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 return rc;
2343}
2344
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002345static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2346{
2347 int rc = cifs_writepage_locked(page, wbc);
2348 unlock_page(page);
2349 return rc;
2350}
2351
Nick Piggind9414772008-09-24 11:32:59 -04002352static int cifs_write_end(struct file *file, struct address_space *mapping,
2353 loff_t pos, unsigned len, unsigned copied,
2354 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355{
Nick Piggind9414772008-09-24 11:32:59 -04002356 int rc;
2357 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002358 struct cifsFileInfo *cfile = file->private_data;
2359 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2360 __u32 pid;
2361
2362 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2363 pid = cfile->pid;
2364 else
2365 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366
Joe Perchesf96637b2013-05-04 22:12:25 -05002367 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002368 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002369
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002370 if (PageChecked(page)) {
2371 if (copied == len)
2372 SetPageUptodate(page);
2373 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002374 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002375 SetPageUptodate(page);
2376
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002378 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002379 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002380 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002381
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002382 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 /* this is probably better than directly calling
2384 partialpage_write since in this function the file handle is
2385 known which we might as well leverage */
2386 /* BB check if anything else missing out of ppw
2387 such as updating last write time */
2388 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002389 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002390 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002392
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002393 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002394 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002395 rc = copied;
2396 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002397 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 }
2399
Nick Piggind9414772008-09-24 11:32:59 -04002400 if (rc > 0) {
2401 spin_lock(&inode->i_lock);
2402 if (pos > inode->i_size)
2403 i_size_write(inode, pos);
2404 spin_unlock(&inode->i_lock);
2405 }
2406
2407 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002408 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002409
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 return rc;
2411}
2412
Josef Bacik02c24a82011-07-16 20:44:56 -04002413int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2414 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002416 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002418 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002419 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002420 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002421 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002422 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002424 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002425 if (rc)
2426 return rc;
Al Viro59551022016-01-22 15:40:57 -05002427 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04002428
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002429 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430
Al Viro35c265e2014-08-19 20:25:34 -04002431 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2432 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002433
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002434 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002435 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002436 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002437 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002438 rc = 0; /* don't care about it in fsync */
2439 }
2440 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002441
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002442 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002443 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2444 server = tcon->ses->server;
2445 if (server->ops->flush)
2446 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2447 else
2448 rc = -ENOSYS;
2449 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002450
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002451 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002452 inode_unlock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002453 return rc;
2454}
2455
Josef Bacik02c24a82011-07-16 20:44:56 -04002456int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002457{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002458 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002459 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002460 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002461 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002462 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002463 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002464 struct inode *inode = file->f_mapping->host;
2465
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002466 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002467 if (rc)
2468 return rc;
Al Viro59551022016-01-22 15:40:57 -05002469 inode_lock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002470
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002471 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002472
Al Viro35c265e2014-08-19 20:25:34 -04002473 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2474 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002475
2476 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002477 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2478 server = tcon->ses->server;
2479 if (server->ops->flush)
2480 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2481 else
2482 rc = -ENOSYS;
2483 }
Steve Frenchb298f222009-02-21 21:17:43 +00002484
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002485 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002486 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 return rc;
2488}
2489
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490/*
2491 * As file closes, flush all cached write data for this inode checking
2492 * for write behind errors.
2493 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002494int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495{
Al Viro496ad9a2013-01-23 17:07:38 -05002496 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 int rc = 0;
2498
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002499 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002500 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002501
Joe Perchesf96637b2013-05-04 22:12:25 -05002502 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503
2504 return rc;
2505}
2506
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002507static int
2508cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2509{
2510 int rc = 0;
2511 unsigned long i;
2512
2513 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002514 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002515 if (!pages[i]) {
2516 /*
2517 * save number of pages we have already allocated and
2518 * return with ENOMEM error
2519 */
2520 num_pages = i;
2521 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002522 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002523 }
2524 }
2525
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002526 if (rc) {
2527 for (i = 0; i < num_pages; i++)
2528 put_page(pages[i]);
2529 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002530 return rc;
2531}
2532
2533static inline
2534size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2535{
2536 size_t num_pages;
2537 size_t clen;
2538
2539 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002540 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002541
2542 if (cur_len)
2543 *cur_len = clen;
2544
2545 return num_pages;
2546}
2547
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002548static void
Steve French4a5c80d2014-02-07 20:45:12 -06002549cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002550{
2551 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002552 struct cifs_writedata *wdata = container_of(refcount,
2553 struct cifs_writedata, refcount);
2554
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002555 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
Steve French4a5c80d2014-02-07 20:45:12 -06002556 for (i = 0; i < wdata->nr_pages; i++)
2557 put_page(wdata->pages[i]);
2558 cifs_writedata_release(refcount);
2559}
2560
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002561static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2562
Steve French4a5c80d2014-02-07 20:45:12 -06002563static void
2564cifs_uncached_writev_complete(struct work_struct *work)
2565{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002566 struct cifs_writedata *wdata = container_of(work,
2567 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002568 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002569 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2570
2571 spin_lock(&inode->i_lock);
2572 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2573 if (cifsi->server_eof > inode->i_size)
2574 i_size_write(inode, cifsi->server_eof);
2575 spin_unlock(&inode->i_lock);
2576
2577 complete(&wdata->done);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002578 collect_uncached_write_data(wdata->ctx);
2579 /* the below call can possibly free the last ref to aio ctx */
Steve French4a5c80d2014-02-07 20:45:12 -06002580 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002581}
2582
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002583static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002584wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2585 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002586{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002587 size_t save_len, copied, bytes, cur_len = *len;
2588 unsigned long i, nr_pages = *num_pages;
2589
2590 save_len = cur_len;
2591 for (i = 0; i < nr_pages; i++) {
2592 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2593 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2594 cur_len -= copied;
2595 /*
2596 * If we didn't copy as much as we expected, then that
2597 * may mean we trod into an unmapped area. Stop copying
2598 * at that point. On the next pass through the big
2599 * loop, we'll likely end up getting a zero-length
2600 * write and bailing out of it.
2601 */
2602 if (copied < bytes)
2603 break;
2604 }
2605 cur_len = save_len - cur_len;
2606 *len = cur_len;
2607
2608 /*
2609 * If we have no data to send, then that probably means that
2610 * the copy above failed altogether. That's most likely because
2611 * the address in the iovec was bogus. Return -EFAULT and let
2612 * the caller free anything we allocated and bail out.
2613 */
2614 if (!cur_len)
2615 return -EFAULT;
2616
2617 /*
2618 * i + 1 now represents the number of pages we actually used in
2619 * the copy phase above.
2620 */
2621 *num_pages = i + 1;
2622 return 0;
2623}
2624
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002625static int
Long Li8c5f9c12018-10-31 22:13:10 +00002626cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
2627 struct cifs_aio_ctx *ctx)
2628{
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002629 unsigned int wsize;
2630 struct cifs_credits credits;
Long Li8c5f9c12018-10-31 22:13:10 +00002631 int rc;
2632 struct TCP_Server_Info *server =
2633 tlink_tcon(wdata->cfile->tlink)->ses->server;
2634
2635 /*
Long Li6ac79292018-12-06 04:51:06 +00002636 * Wait for credits to resend this wdata.
Long Li8c5f9c12018-10-31 22:13:10 +00002637 * Note: we are attempting to resend the whole wdata not in segments
2638 */
2639 do {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002640 rc = server->ops->wait_mtu_credits(server, wdata->bytes, &wsize,
2641 &credits);
Long Li8c5f9c12018-10-31 22:13:10 +00002642
2643 if (rc)
Long Li6ac79292018-12-06 04:51:06 +00002644 goto out;
Long Li8c5f9c12018-10-31 22:13:10 +00002645
2646 if (wsize < wdata->bytes) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002647 add_credits_and_wake_if(server, &credits, 0);
Long Li8c5f9c12018-10-31 22:13:10 +00002648 msleep(1000);
Long Li8c5f9c12018-10-31 22:13:10 +00002649 }
Long Li6ac79292018-12-06 04:51:06 +00002650 } while (wsize < wdata->bytes);
Long Li8c5f9c12018-10-31 22:13:10 +00002651
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002652 wdata->credits = credits;
Long Li8c5f9c12018-10-31 22:13:10 +00002653 rc = -EAGAIN;
2654 while (rc == -EAGAIN) {
2655 rc = 0;
2656 if (wdata->cfile->invalidHandle)
2657 rc = cifs_reopen_file(wdata->cfile, false);
2658 if (!rc)
2659 rc = server->ops->async_writev(wdata,
2660 cifs_uncached_writedata_release);
2661 }
2662
2663 if (!rc) {
2664 list_add_tail(&wdata->list, wdata_list);
2665 return 0;
2666 }
2667
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002668 add_credits_and_wake_if(server, &wdata->credits, 0);
Long Li8c5f9c12018-10-31 22:13:10 +00002669out:
2670 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2671
2672 return rc;
2673}
2674
2675static int
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002676cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2677 struct cifsFileInfo *open_file,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002678 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2679 struct cifs_aio_ctx *ctx)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002680{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002681 int rc = 0;
2682 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002683 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002684 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002685 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002686 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002687 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002688 struct TCP_Server_Info *server;
Long Li8c5f9c12018-10-31 22:13:10 +00002689 struct page **pagevec;
2690 size_t start;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002691 unsigned int xid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002692
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002693 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2694 pid = open_file->pid;
2695 else
2696 pid = current->tgid;
2697
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002698 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002699 xid = get_xid();
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002700
2701 do {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002702 unsigned int wsize;
2703 struct cifs_credits credits_on_stack;
2704 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002705
Pavel Shilovsky3e952992019-01-25 11:59:01 -08002706 if (open_file->invalidHandle) {
2707 rc = cifs_reopen_file(open_file, false);
2708 if (rc == -EAGAIN)
2709 continue;
2710 else if (rc)
2711 break;
2712 }
2713
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002714 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002715 &wsize, credits);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002716 if (rc)
2717 break;
2718
Long Lib6bc8a72018-12-16 23:17:04 +00002719 cur_len = min_t(const size_t, len, wsize);
2720
Long Li8c5f9c12018-10-31 22:13:10 +00002721 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05002722 ssize_t result;
2723
2724 result = iov_iter_get_pages_alloc(
Long Lib6bc8a72018-12-16 23:17:04 +00002725 from, &pagevec, cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05002726 if (result < 0) {
Long Li8c5f9c12018-10-31 22:13:10 +00002727 cifs_dbg(VFS,
2728 "direct_writev couldn't get user pages "
2729 "(rc=%zd) iter type %d iov_offset %zd "
2730 "count %zd\n",
Steve Frenchb98e26d2018-11-01 10:54:32 -05002731 result, from->type,
Long Li8c5f9c12018-10-31 22:13:10 +00002732 from->iov_offset, from->count);
2733 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00002734
2735 rc = result;
2736 add_credits_and_wake_if(server, credits, 0);
Long Li8c5f9c12018-10-31 22:13:10 +00002737 break;
2738 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05002739 cur_len = (size_t)result;
Long Li8c5f9c12018-10-31 22:13:10 +00002740 iov_iter_advance(from, cur_len);
2741
2742 nr_pages =
2743 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
2744
2745 wdata = cifs_writedata_direct_alloc(pagevec,
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002746 cifs_uncached_writev_complete);
Long Li8c5f9c12018-10-31 22:13:10 +00002747 if (!wdata) {
2748 rc = -ENOMEM;
2749 add_credits_and_wake_if(server, credits, 0);
2750 break;
2751 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002752
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002753
Long Li8c5f9c12018-10-31 22:13:10 +00002754 wdata->page_offset = start;
2755 wdata->tailsz =
2756 nr_pages > 1 ?
2757 cur_len - (PAGE_SIZE - start) -
2758 (nr_pages - 2) * PAGE_SIZE :
2759 cur_len;
2760 } else {
2761 nr_pages = get_numpages(wsize, len, &cur_len);
2762 wdata = cifs_writedata_alloc(nr_pages,
2763 cifs_uncached_writev_complete);
2764 if (!wdata) {
2765 rc = -ENOMEM;
2766 add_credits_and_wake_if(server, credits, 0);
2767 break;
2768 }
Jeff Layton5d81de82014-02-14 07:20:35 -05002769
Long Li8c5f9c12018-10-31 22:13:10 +00002770 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2771 if (rc) {
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08002772 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00002773 kfree(wdata);
2774 add_credits_and_wake_if(server, credits, 0);
2775 break;
2776 }
2777
2778 num_pages = nr_pages;
2779 rc = wdata_fill_from_iovec(
2780 wdata, from, &cur_len, &num_pages);
2781 if (rc) {
2782 for (i = 0; i < nr_pages; i++)
2783 put_page(wdata->pages[i]);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08002784 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00002785 kfree(wdata);
2786 add_credits_and_wake_if(server, credits, 0);
2787 break;
2788 }
2789
2790 /*
2791 * Bring nr_pages down to the number of pages we
2792 * actually used, and free any pages that we didn't use.
2793 */
2794 for ( ; nr_pages > num_pages; nr_pages--)
2795 put_page(wdata->pages[nr_pages - 1]);
2796
2797 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2798 }
Jeff Layton5d81de82014-02-14 07:20:35 -05002799
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002800 wdata->sync_mode = WB_SYNC_ALL;
2801 wdata->nr_pages = nr_pages;
2802 wdata->offset = (__u64)offset;
2803 wdata->cfile = cifsFileInfo_get(open_file);
2804 wdata->pid = pid;
2805 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002806 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002807 wdata->credits = credits_on_stack;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002808 wdata->ctx = ctx;
2809 kref_get(&ctx->refcount);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002810
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002811 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2812
2813 if (!rc) {
2814 if (wdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08002815 rc = -EAGAIN;
2816 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002817 rc = server->ops->async_writev(wdata,
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002818 cifs_uncached_writedata_release);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002819 }
2820
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002821 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002822 add_credits_and_wake_if(server, &wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002823 kref_put(&wdata->refcount,
2824 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002825 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002826 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002827 iov_iter_advance(from, offset - saved_offset);
2828 continue;
2829 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002830 break;
2831 }
2832
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002833 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002834 offset += cur_len;
2835 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002836 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002837
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002838 free_xid(xid);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002839 return rc;
2840}
2841
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002842static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2843{
2844 struct cifs_writedata *wdata, *tmp;
2845 struct cifs_tcon *tcon;
2846 struct cifs_sb_info *cifs_sb;
2847 struct dentry *dentry = ctx->cfile->dentry;
2848 unsigned int i;
2849 int rc;
2850
2851 tcon = tlink_tcon(ctx->cfile->tlink);
2852 cifs_sb = CIFS_SB(dentry->d_sb);
2853
2854 mutex_lock(&ctx->aio_mutex);
2855
2856 if (list_empty(&ctx->list)) {
2857 mutex_unlock(&ctx->aio_mutex);
2858 return;
2859 }
2860
2861 rc = ctx->rc;
2862 /*
2863 * Wait for and collect replies for any successful sends in order of
2864 * increasing offset. Once an error is hit, then return without waiting
2865 * for any more replies.
2866 */
2867restart_loop:
2868 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
2869 if (!rc) {
2870 if (!try_wait_for_completion(&wdata->done)) {
2871 mutex_unlock(&ctx->aio_mutex);
2872 return;
2873 }
2874
2875 if (wdata->result)
2876 rc = wdata->result;
2877 else
2878 ctx->total_len += wdata->bytes;
2879
2880 /* resend call if it's a retryable error */
2881 if (rc == -EAGAIN) {
2882 struct list_head tmp_list;
2883 struct iov_iter tmp_from = ctx->iter;
2884
2885 INIT_LIST_HEAD(&tmp_list);
2886 list_del_init(&wdata->list);
2887
Long Li8c5f9c12018-10-31 22:13:10 +00002888 if (ctx->direct_io)
2889 rc = cifs_resend_wdata(
2890 wdata, &tmp_list, ctx);
2891 else {
2892 iov_iter_advance(&tmp_from,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002893 wdata->offset - ctx->pos);
2894
Long Li8c5f9c12018-10-31 22:13:10 +00002895 rc = cifs_write_from_iter(wdata->offset,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002896 wdata->bytes, &tmp_from,
2897 ctx->cfile, cifs_sb, &tmp_list,
2898 ctx);
Long Li8c5f9c12018-10-31 22:13:10 +00002899 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002900
2901 list_splice(&tmp_list, &ctx->list);
2902
2903 kref_put(&wdata->refcount,
2904 cifs_uncached_writedata_release);
2905 goto restart_loop;
2906 }
2907 }
2908 list_del_init(&wdata->list);
2909 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2910 }
2911
Long Li8c5f9c12018-10-31 22:13:10 +00002912 if (!ctx->direct_io)
2913 for (i = 0; i < ctx->npages; i++)
2914 put_page(ctx->bv[i].bv_page);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002915
2916 cifs_stats_bytes_written(tcon, ctx->total_len);
2917 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
2918
2919 ctx->rc = (rc == 0) ? ctx->total_len : rc;
2920
2921 mutex_unlock(&ctx->aio_mutex);
2922
2923 if (ctx->iocb && ctx->iocb->ki_complete)
2924 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
2925 else
2926 complete(&ctx->done);
2927}
2928
Long Li8c5f9c12018-10-31 22:13:10 +00002929static ssize_t __cifs_writev(
2930 struct kiocb *iocb, struct iov_iter *from, bool direct)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002931{
Al Viroe9d15932015-04-06 22:44:11 -04002932 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002933 ssize_t total_written = 0;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002934 struct cifsFileInfo *cfile;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002935 struct cifs_tcon *tcon;
2936 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002937 struct cifs_aio_ctx *ctx;
Al Virofc56b982016-09-21 18:18:23 -04002938 struct iov_iter saved_from = *from;
Long Li8c5f9c12018-10-31 22:13:10 +00002939 size_t len = iov_iter_count(from);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002940 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002941
Al Viroe9d15932015-04-06 22:44:11 -04002942 /*
Long Li8c5f9c12018-10-31 22:13:10 +00002943 * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
2944 * In this case, fall back to non-direct write function.
2945 * this could be improved by getting pages directly in ITER_KVEC
Al Viroe9d15932015-04-06 22:44:11 -04002946 */
Long Li8c5f9c12018-10-31 22:13:10 +00002947 if (direct && from->type & ITER_KVEC) {
2948 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
2949 direct = false;
2950 }
Al Viroe9d15932015-04-06 22:44:11 -04002951
Al Viro3309dd02015-04-09 12:55:47 -04002952 rc = generic_write_checks(iocb, from);
2953 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002954 return rc;
2955
Al Viro7119e222014-10-22 00:25:12 -04002956 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002957 cfile = file->private_data;
2958 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002959
2960 if (!tcon->ses->server->ops->async_writev)
2961 return -ENOSYS;
2962
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002963 ctx = cifs_aio_ctx_alloc();
2964 if (!ctx)
2965 return -ENOMEM;
2966
2967 ctx->cfile = cifsFileInfo_get(cfile);
2968
2969 if (!is_sync_kiocb(iocb))
2970 ctx->iocb = iocb;
2971
2972 ctx->pos = iocb->ki_pos;
2973
Long Li8c5f9c12018-10-31 22:13:10 +00002974 if (direct) {
2975 ctx->direct_io = true;
2976 ctx->iter = *from;
2977 ctx->len = len;
2978 } else {
2979 rc = setup_aio_ctx_iter(ctx, from, WRITE);
2980 if (rc) {
2981 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2982 return rc;
2983 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002984 }
2985
2986 /* grab a lock here due to read response handlers can access ctx */
2987 mutex_lock(&ctx->aio_mutex);
2988
2989 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
2990 cfile, cifs_sb, &ctx->list, ctx);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002991
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002992 /*
2993 * If at least one write was successfully sent, then discard any rc
2994 * value from the later writes. If the other write succeeds, then
2995 * we'll end up returning whatever was written. If it fails, then
2996 * we'll get a new rc value from that.
2997 */
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002998 if (!list_empty(&ctx->list))
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002999 rc = 0;
3000
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003001 mutex_unlock(&ctx->aio_mutex);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003002
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003003 if (rc) {
3004 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3005 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003006 }
3007
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003008 if (!is_sync_kiocb(iocb)) {
3009 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3010 return -EIOCBQUEUED;
3011 }
3012
3013 rc = wait_for_completion_killable(&ctx->done);
3014 if (rc) {
3015 mutex_lock(&ctx->aio_mutex);
3016 ctx->rc = rc = -EINTR;
3017 total_written = ctx->total_len;
3018 mutex_unlock(&ctx->aio_mutex);
3019 } else {
3020 rc = ctx->rc;
3021 total_written = ctx->total_len;
3022 }
3023
3024 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3025
Al Viroe9d15932015-04-06 22:44:11 -04003026 if (unlikely(!total_written))
3027 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003028
Al Viroe9d15932015-04-06 22:44:11 -04003029 iocb->ki_pos += total_written;
Al Viroe9d15932015-04-06 22:44:11 -04003030 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003031}
3032
Long Li8c5f9c12018-10-31 22:13:10 +00003033ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
3034{
3035 return __cifs_writev(iocb, from, true);
3036}
3037
3038ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
3039{
3040 return __cifs_writev(iocb, from, false);
3041}
3042
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003043static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003044cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003045{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003046 struct file *file = iocb->ki_filp;
3047 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
3048 struct inode *inode = file->f_mapping->host;
3049 struct cifsInodeInfo *cinode = CIFS_I(inode);
3050 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04003051 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003052
Rabin Vincent966681c2017-06-29 16:01:42 +02003053 inode_lock(inode);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003054 /*
3055 * We need to hold the sem to be sure nobody modifies lock list
3056 * with a brlock that prevents writing.
3057 */
3058 down_read(&cinode->lock_sem);
Al Viro5f380c72015-04-07 11:28:12 -04003059
Al Viro3309dd02015-04-09 12:55:47 -04003060 rc = generic_write_checks(iocb, from);
3061 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04003062 goto out;
3063
Al Viro5f380c72015-04-07 11:28:12 -04003064 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Ronnie Sahlberg96457592018-10-04 09:24:38 +10003065 server->vals->exclusive_lock_type, 0,
3066 NULL, CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04003067 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04003068 else
3069 rc = -EACCES;
3070out:
Rabin Vincent966681c2017-06-29 16:01:42 +02003071 up_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05003072 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04003073
Christoph Hellwige2592212016-04-07 08:52:01 -07003074 if (rc > 0)
3075 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003076 return rc;
3077}
3078
3079ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003080cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003081{
Al Viro496ad9a2013-01-23 17:07:38 -05003082 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003083 struct cifsInodeInfo *cinode = CIFS_I(inode);
3084 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3085 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3086 iocb->ki_filp->private_data;
3087 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003088 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003089
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003090 written = cifs_get_writer(cinode);
3091 if (written)
3092 return written;
3093
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003094 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003095 if (cap_unix(tcon->ses) &&
3096 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003097 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04003098 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003099 goto out;
3100 }
Al Viro3dae8752014-04-03 12:05:17 -04003101 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003102 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04003103 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003104 /*
3105 * For non-oplocked files in strict cache mode we need to write the data
3106 * to the server exactly from the pos to pos+len-1 rather than flush all
3107 * affected pages because it may cause a error with mandatory locks on
3108 * these pages but not on the region from pos to ppos+len-1.
3109 */
Al Viro3dae8752014-04-03 12:05:17 -04003110 written = cifs_user_writev(iocb, from);
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003111 if (CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003112 /*
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003113 * We have read level caching and we have just sent a write
3114 * request to the server thus making data in the cache stale.
3115 * Zap the cache and set oplock/lease level to NONE to avoid
3116 * reading stale data from the cache. All subsequent read
3117 * operations will read new data from the server.
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003118 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003119 cifs_zap_mapping(inode);
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003120 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05003121 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003122 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003123 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003124out:
3125 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003126 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003127}
3128
Jeff Layton0471ca32012-05-16 07:13:16 -04003129static struct cifs_readdata *
Long Lif9f5aca2018-05-30 12:47:54 -07003130cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04003131{
3132 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003133
Long Lif9f5aca2018-05-30 12:47:54 -07003134 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04003135 if (rdata != NULL) {
Long Lif9f5aca2018-05-30 12:47:54 -07003136 rdata->pages = pages;
Jeff Layton6993f742012-05-16 07:13:17 -04003137 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003138 INIT_LIST_HEAD(&rdata->list);
3139 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04003140 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04003141 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003142
Jeff Layton0471ca32012-05-16 07:13:16 -04003143 return rdata;
3144}
3145
Long Lif9f5aca2018-05-30 12:47:54 -07003146static struct cifs_readdata *
3147cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
3148{
3149 struct page **pages =
Kees Cook6396bb22018-06-12 14:03:40 -07003150 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Long Lif9f5aca2018-05-30 12:47:54 -07003151 struct cifs_readdata *ret = NULL;
3152
3153 if (pages) {
3154 ret = cifs_readdata_direct_alloc(pages, complete);
3155 if (!ret)
3156 kfree(pages);
3157 }
3158
3159 return ret;
3160}
3161
Jeff Layton6993f742012-05-16 07:13:17 -04003162void
3163cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04003164{
Jeff Layton6993f742012-05-16 07:13:17 -04003165 struct cifs_readdata *rdata = container_of(refcount,
3166 struct cifs_readdata, refcount);
Long Libd3dcc62017-11-22 17:38:47 -07003167#ifdef CONFIG_CIFS_SMB_DIRECT
3168 if (rdata->mr) {
3169 smbd_deregister_mr(rdata->mr);
3170 rdata->mr = NULL;
3171 }
3172#endif
Jeff Layton6993f742012-05-16 07:13:17 -04003173 if (rdata->cfile)
3174 cifsFileInfo_put(rdata->cfile);
3175
Long Lif9f5aca2018-05-30 12:47:54 -07003176 kvfree(rdata->pages);
Jeff Layton0471ca32012-05-16 07:13:16 -04003177 kfree(rdata);
3178}
3179
Jeff Layton2a1bb132012-05-16 07:13:17 -04003180static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003181cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04003182{
3183 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003184 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04003185 unsigned int i;
3186
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003187 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04003188 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3189 if (!page) {
3190 rc = -ENOMEM;
3191 break;
3192 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003193 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04003194 }
3195
3196 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003197 for (i = 0; i < nr_pages; i++) {
3198 put_page(rdata->pages[i]);
3199 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04003200 }
3201 }
3202 return rc;
3203}
3204
3205static void
3206cifs_uncached_readdata_release(struct kref *refcount)
3207{
Jeff Layton1c892542012-05-16 07:13:17 -04003208 struct cifs_readdata *rdata = container_of(refcount,
3209 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003210 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003211
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003212 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003213 for (i = 0; i < rdata->nr_pages; i++) {
3214 put_page(rdata->pages[i]);
Jeff Layton1c892542012-05-16 07:13:17 -04003215 }
3216 cifs_readdata_release(refcount);
3217}
3218
Jeff Layton1c892542012-05-16 07:13:17 -04003219/**
3220 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3221 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05003222 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04003223 *
3224 * This function copies data from a list of pages in a readdata response into
3225 * an array of iovecs. It will first calculate where the data should go
3226 * based on the info in the readdata and then copy the data into that spot.
3227 */
Al Viro7f25bba2014-02-04 14:07:43 -05003228static int
3229cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04003230{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04003231 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003232 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003233
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003234 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003235 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02003236 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003237 size_t written;
3238
David Howells00e23702018-10-22 13:07:28 +01003239 if (unlikely(iov_iter_is_pipe(iter))) {
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003240 void *addr = kmap_atomic(page);
3241
3242 written = copy_to_iter(addr, copy, iter);
3243 kunmap_atomic(addr);
3244 } else
3245 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05003246 remaining -= written;
3247 if (written < copy && iov_iter_count(iter) > 0)
3248 break;
Jeff Layton1c892542012-05-16 07:13:17 -04003249 }
Al Viro7f25bba2014-02-04 14:07:43 -05003250 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04003251}
3252
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003253static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3254
Jeff Layton1c892542012-05-16 07:13:17 -04003255static void
3256cifs_uncached_readv_complete(struct work_struct *work)
3257{
3258 struct cifs_readdata *rdata = container_of(work,
3259 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04003260
3261 complete(&rdata->done);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003262 collect_uncached_read_data(rdata->ctx);
3263 /* the below call can possibly free the last ref to aio ctx */
Jeff Layton1c892542012-05-16 07:13:17 -04003264 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3265}
3266
3267static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003268uncached_fill_pages(struct TCP_Server_Info *server,
3269 struct cifs_readdata *rdata, struct iov_iter *iter,
3270 unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04003271{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003272 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003273 unsigned int i;
3274 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003275 unsigned int page_offset = rdata->page_offset;
Jeff Layton1c892542012-05-16 07:13:17 -04003276
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003277 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07003278 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003279 for (i = 0; i < nr_pages; i++) {
3280 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05003281 size_t n;
Long Li1dbe3462018-05-30 12:47:55 -07003282 unsigned int segment_size = rdata->pagesz;
3283
3284 if (i == 0)
3285 segment_size -= page_offset;
3286 else
3287 page_offset = 0;
3288
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003289
Al Viro71335662016-01-09 19:54:50 -05003290 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04003291 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003292 rdata->pages[i] = NULL;
3293 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04003294 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07003295 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003296 }
Long Li1dbe3462018-05-30 12:47:55 -07003297
Al Viro71335662016-01-09 19:54:50 -05003298 n = len;
Long Li1dbe3462018-05-30 12:47:55 -07003299 if (len >= segment_size)
Al Viro71335662016-01-09 19:54:50 -05003300 /* enough data to fill the page */
Long Li1dbe3462018-05-30 12:47:55 -07003301 n = segment_size;
3302 else
Al Viro71335662016-01-09 19:54:50 -05003303 rdata->tailsz = len;
Long Li1dbe3462018-05-30 12:47:55 -07003304 len -= n;
3305
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003306 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003307 result = copy_page_from_iter(
3308 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003309#ifdef CONFIG_CIFS_SMB_DIRECT
3310 else if (rdata->mr)
3311 result = n;
3312#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003313 else
Long Li1dbe3462018-05-30 12:47:55 -07003314 result = cifs_read_page_from_socket(
3315 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003316 if (result < 0)
3317 break;
3318
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003319 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003320 }
3321
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003322 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3323 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003324}
3325
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003326static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003327cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3328 struct cifs_readdata *rdata, unsigned int len)
3329{
3330 return uncached_fill_pages(server, rdata, NULL, len);
3331}
3332
3333static int
3334cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3335 struct cifs_readdata *rdata,
3336 struct iov_iter *iter)
3337{
3338 return uncached_fill_pages(server, rdata, iter, iter->count);
3339}
3340
Long Li6e6e2b82018-10-31 22:13:09 +00003341static int cifs_resend_rdata(struct cifs_readdata *rdata,
3342 struct list_head *rdata_list,
3343 struct cifs_aio_ctx *ctx)
3344{
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003345 unsigned int rsize;
3346 struct cifs_credits credits;
Long Li6e6e2b82018-10-31 22:13:09 +00003347 int rc;
3348 struct TCP_Server_Info *server =
3349 tlink_tcon(rdata->cfile->tlink)->ses->server;
3350
3351 /*
Long Li6ac79292018-12-06 04:51:06 +00003352 * Wait for credits to resend this rdata.
Long Li6e6e2b82018-10-31 22:13:09 +00003353 * Note: we are attempting to resend the whole rdata not in segments
3354 */
3355 do {
3356 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
3357 &rsize, &credits);
3358
3359 if (rc)
Long Li6ac79292018-12-06 04:51:06 +00003360 goto out;
Long Li6e6e2b82018-10-31 22:13:09 +00003361
3362 if (rsize < rdata->bytes) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003363 add_credits_and_wake_if(server, &credits, 0);
Long Li6e6e2b82018-10-31 22:13:09 +00003364 msleep(1000);
Long Li6e6e2b82018-10-31 22:13:09 +00003365 }
Long Li6ac79292018-12-06 04:51:06 +00003366 } while (rsize < rdata->bytes);
Long Li6e6e2b82018-10-31 22:13:09 +00003367
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003368 rdata->credits = credits;
Long Li6e6e2b82018-10-31 22:13:09 +00003369 rc = -EAGAIN;
3370 while (rc == -EAGAIN) {
3371 rc = 0;
3372 if (rdata->cfile->invalidHandle)
3373 rc = cifs_reopen_file(rdata->cfile, true);
3374 if (!rc)
3375 rc = server->ops->async_readv(rdata);
3376 }
3377
3378 if (!rc) {
3379 /* Add to aio pending list */
3380 list_add_tail(&rdata->list, rdata_list);
3381 return 0;
3382 }
3383
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003384 add_credits_and_wake_if(server, &rdata->credits, 0);
Long Li6e6e2b82018-10-31 22:13:09 +00003385out:
3386 kref_put(&rdata->refcount,
3387 cifs_uncached_readdata_release);
3388
3389 return rc;
3390}
3391
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003392static int
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003393cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003394 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3395 struct cifs_aio_ctx *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003397 struct cifs_readdata *rdata;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003398 unsigned int npages, rsize;
3399 struct cifs_credits credits_on_stack;
3400 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003401 size_t cur_len;
3402 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003403 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003404 struct TCP_Server_Info *server;
Long Li6e6e2b82018-10-31 22:13:09 +00003405 struct page **pagevec;
3406 size_t start;
3407 struct iov_iter direct_iov = ctx->iter;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003408
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003409 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003410
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003411 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3412 pid = open_file->pid;
3413 else
3414 pid = current->tgid;
3415
Long Li6e6e2b82018-10-31 22:13:09 +00003416 if (ctx->direct_io)
3417 iov_iter_advance(&direct_iov, offset - ctx->pos);
3418
Jeff Layton1c892542012-05-16 07:13:17 -04003419 do {
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003420 if (open_file->invalidHandle) {
3421 rc = cifs_reopen_file(open_file, true);
3422 if (rc == -EAGAIN)
3423 continue;
3424 else if (rc)
3425 break;
3426 }
3427
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003428 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003429 &rsize, credits);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003430 if (rc)
3431 break;
3432
3433 cur_len = min_t(const size_t, len, rsize);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003434
Long Li6e6e2b82018-10-31 22:13:09 +00003435 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05003436 ssize_t result;
Long Li6e6e2b82018-10-31 22:13:09 +00003437
Steve Frenchb98e26d2018-11-01 10:54:32 -05003438 result = iov_iter_get_pages_alloc(
Long Li6e6e2b82018-10-31 22:13:09 +00003439 &direct_iov, &pagevec,
3440 cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05003441 if (result < 0) {
Long Li6e6e2b82018-10-31 22:13:09 +00003442 cifs_dbg(VFS,
Long Li54e94ff2018-12-16 22:41:07 +00003443 "couldn't get user pages (rc=%zd)"
Long Li6e6e2b82018-10-31 22:13:09 +00003444 " iter type %d"
3445 " iov_offset %zd count %zd\n",
Steve Frenchb98e26d2018-11-01 10:54:32 -05003446 result, direct_iov.type,
Long Li6e6e2b82018-10-31 22:13:09 +00003447 direct_iov.iov_offset,
3448 direct_iov.count);
3449 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00003450
3451 rc = result;
3452 add_credits_and_wake_if(server, credits, 0);
Long Li6e6e2b82018-10-31 22:13:09 +00003453 break;
3454 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05003455 cur_len = (size_t)result;
Long Li6e6e2b82018-10-31 22:13:09 +00003456 iov_iter_advance(&direct_iov, cur_len);
3457
3458 rdata = cifs_readdata_direct_alloc(
3459 pagevec, cifs_uncached_readv_complete);
3460 if (!rdata) {
3461 add_credits_and_wake_if(server, credits, 0);
3462 rc = -ENOMEM;
3463 break;
3464 }
3465
3466 npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
3467 rdata->page_offset = start;
3468 rdata->tailsz = npages > 1 ?
3469 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
3470 cur_len;
3471
3472 } else {
3473
3474 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3475 /* allocate a readdata struct */
3476 rdata = cifs_readdata_alloc(npages,
Jeff Layton1c892542012-05-16 07:13:17 -04003477 cifs_uncached_readv_complete);
Long Li6e6e2b82018-10-31 22:13:09 +00003478 if (!rdata) {
3479 add_credits_and_wake_if(server, credits, 0);
3480 rc = -ENOMEM;
3481 break;
3482 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003483
Long Li6e6e2b82018-10-31 22:13:09 +00003484 rc = cifs_read_allocate_pages(rdata, npages);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08003485 if (rc) {
3486 kvfree(rdata->pages);
3487 kfree(rdata);
3488 add_credits_and_wake_if(server, credits, 0);
3489 break;
3490 }
Long Li6e6e2b82018-10-31 22:13:09 +00003491
3492 rdata->tailsz = PAGE_SIZE;
3493 }
Jeff Layton1c892542012-05-16 07:13:17 -04003494
3495 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003496 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003497 rdata->offset = offset;
3498 rdata->bytes = cur_len;
3499 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003500 rdata->pagesz = PAGE_SIZE;
3501 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003502 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003503 rdata->credits = credits_on_stack;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003504 rdata->ctx = ctx;
3505 kref_get(&ctx->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003506
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003507 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3508
3509 if (!rc) {
3510 if (rdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003511 rc = -EAGAIN;
3512 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003513 rc = server->ops->async_readv(rdata);
3514 }
3515
Jeff Layton1c892542012-05-16 07:13:17 -04003516 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003517 add_credits_and_wake_if(server, &rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003518 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003519 cifs_uncached_readdata_release);
3520 if (rc == -EAGAIN) {
3521 iov_iter_revert(&direct_iov, cur_len);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003522 continue;
Long Li6e6e2b82018-10-31 22:13:09 +00003523 }
Jeff Layton1c892542012-05-16 07:13:17 -04003524 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525 }
Jeff Layton1c892542012-05-16 07:13:17 -04003526
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003527 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003528 offset += cur_len;
3529 len -= cur_len;
3530 } while (len > 0);
3531
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003532 return rc;
3533}
3534
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003535static void
3536collect_uncached_read_data(struct cifs_aio_ctx *ctx)
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003537{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003538 struct cifs_readdata *rdata, *tmp;
3539 struct iov_iter *to = &ctx->iter;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003540 struct cifs_sb_info *cifs_sb;
3541 struct cifs_tcon *tcon;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003542 unsigned int i;
3543 int rc;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003544
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003545 tcon = tlink_tcon(ctx->cfile->tlink);
3546 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003547
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003548 mutex_lock(&ctx->aio_mutex);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003549
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003550 if (list_empty(&ctx->list)) {
3551 mutex_unlock(&ctx->aio_mutex);
3552 return;
3553 }
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003554
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003555 rc = ctx->rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003556 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003557again:
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003558 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
Jeff Layton1c892542012-05-16 07:13:17 -04003559 if (!rc) {
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003560 if (!try_wait_for_completion(&rdata->done)) {
3561 mutex_unlock(&ctx->aio_mutex);
3562 return;
3563 }
3564
3565 if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003566 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003567 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003568 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003569
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003570 list_del_init(&rdata->list);
3571 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003572
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003573 /*
3574 * Got a part of data and then reconnect has
3575 * happened -- fill the buffer and continue
3576 * reading.
3577 */
3578 if (got_bytes && got_bytes < rdata->bytes) {
Long Li6e6e2b82018-10-31 22:13:09 +00003579 rc = 0;
3580 if (!ctx->direct_io)
3581 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003582 if (rc) {
3583 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003584 cifs_uncached_readdata_release);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003585 continue;
3586 }
3587 }
3588
Long Li6e6e2b82018-10-31 22:13:09 +00003589 if (ctx->direct_io) {
3590 /*
3591 * Re-use rdata as this is a
3592 * direct I/O
3593 */
3594 rc = cifs_resend_rdata(
3595 rdata,
3596 &tmp_list, ctx);
3597 } else {
3598 rc = cifs_send_async_read(
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003599 rdata->offset + got_bytes,
3600 rdata->bytes - got_bytes,
3601 rdata->cfile, cifs_sb,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003602 &tmp_list, ctx);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003603
Long Li6e6e2b82018-10-31 22:13:09 +00003604 kref_put(&rdata->refcount,
3605 cifs_uncached_readdata_release);
3606 }
3607
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003608 list_splice(&tmp_list, &ctx->list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003609
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003610 goto again;
3611 } else if (rdata->result)
3612 rc = rdata->result;
Long Li6e6e2b82018-10-31 22:13:09 +00003613 else if (!ctx->direct_io)
Jeff Layton1c892542012-05-16 07:13:17 -04003614 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003615
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003616 /* if there was a short read -- discard anything left */
3617 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3618 rc = -ENODATA;
Long Li6e6e2b82018-10-31 22:13:09 +00003619
3620 ctx->total_len += rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003621 }
3622 list_del_init(&rdata->list);
3623 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003625
Long Li6e6e2b82018-10-31 22:13:09 +00003626 if (!ctx->direct_io) {
3627 for (i = 0; i < ctx->npages; i++) {
3628 if (ctx->should_dirty)
3629 set_page_dirty(ctx->bv[i].bv_page);
3630 put_page(ctx->bv[i].bv_page);
3631 }
Al Viro7f25bba2014-02-04 14:07:43 -05003632
Long Li6e6e2b82018-10-31 22:13:09 +00003633 ctx->total_len = ctx->len - iov_iter_count(to);
3634 }
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003635
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003636 /* mask nodata case */
3637 if (rc == -ENODATA)
3638 rc = 0;
3639
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003640 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3641
3642 mutex_unlock(&ctx->aio_mutex);
3643
3644 if (ctx->iocb && ctx->iocb->ki_complete)
3645 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3646 else
3647 complete(&ctx->done);
3648}
3649
Long Li6e6e2b82018-10-31 22:13:09 +00003650static ssize_t __cifs_readv(
3651 struct kiocb *iocb, struct iov_iter *to, bool direct)
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003652{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003653 size_t len;
Long Li6e6e2b82018-10-31 22:13:09 +00003654 struct file *file = iocb->ki_filp;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003655 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003656 struct cifsFileInfo *cfile;
Long Li6e6e2b82018-10-31 22:13:09 +00003657 struct cifs_tcon *tcon;
3658 ssize_t rc, total_read = 0;
3659 loff_t offset = iocb->ki_pos;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003660 struct cifs_aio_ctx *ctx;
3661
Long Li6e6e2b82018-10-31 22:13:09 +00003662 /*
3663 * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
3664 * fall back to data copy read path
3665 * this could be improved by getting pages directly in ITER_KVEC
3666 */
3667 if (direct && to->type & ITER_KVEC) {
3668 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
3669 direct = false;
3670 }
3671
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003672 len = iov_iter_count(to);
3673 if (!len)
3674 return 0;
3675
3676 cifs_sb = CIFS_FILE_SB(file);
3677 cfile = file->private_data;
3678 tcon = tlink_tcon(cfile->tlink);
3679
3680 if (!tcon->ses->server->ops->async_readv)
3681 return -ENOSYS;
3682
3683 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3684 cifs_dbg(FYI, "attempting read on write only file instance\n");
3685
3686 ctx = cifs_aio_ctx_alloc();
3687 if (!ctx)
3688 return -ENOMEM;
3689
3690 ctx->cfile = cifsFileInfo_get(cfile);
3691
3692 if (!is_sync_kiocb(iocb))
3693 ctx->iocb = iocb;
3694
David Howells00e23702018-10-22 13:07:28 +01003695 if (iter_is_iovec(to))
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003696 ctx->should_dirty = true;
3697
Long Li6e6e2b82018-10-31 22:13:09 +00003698 if (direct) {
3699 ctx->pos = offset;
3700 ctx->direct_io = true;
3701 ctx->iter = *to;
3702 ctx->len = len;
3703 } else {
3704 rc = setup_aio_ctx_iter(ctx, to, READ);
3705 if (rc) {
3706 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3707 return rc;
3708 }
3709 len = ctx->len;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003710 }
3711
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003712 /* grab a lock here due to read response handlers can access ctx */
3713 mutex_lock(&ctx->aio_mutex);
3714
3715 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
3716
3717 /* if at least one read request send succeeded, then reset rc */
3718 if (!list_empty(&ctx->list))
3719 rc = 0;
3720
3721 mutex_unlock(&ctx->aio_mutex);
3722
3723 if (rc) {
3724 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3725 return rc;
3726 }
3727
3728 if (!is_sync_kiocb(iocb)) {
3729 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3730 return -EIOCBQUEUED;
3731 }
3732
3733 rc = wait_for_completion_killable(&ctx->done);
3734 if (rc) {
3735 mutex_lock(&ctx->aio_mutex);
3736 ctx->rc = rc = -EINTR;
3737 total_read = ctx->total_len;
3738 mutex_unlock(&ctx->aio_mutex);
3739 } else {
3740 rc = ctx->rc;
3741 total_read = ctx->total_len;
3742 }
3743
3744 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3745
Al Viro0165e812014-02-04 14:19:48 -05003746 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003747 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003748 return total_read;
3749 }
3750 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003751}
3752
Long Li6e6e2b82018-10-31 22:13:09 +00003753ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
3754{
3755 return __cifs_readv(iocb, to, true);
3756}
3757
3758ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3759{
3760 return __cifs_readv(iocb, to, false);
3761}
3762
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003763ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003764cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003765{
Al Viro496ad9a2013-01-23 17:07:38 -05003766 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003767 struct cifsInodeInfo *cinode = CIFS_I(inode);
3768 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3769 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3770 iocb->ki_filp->private_data;
3771 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3772 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003773
3774 /*
3775 * In strict cache mode we need to read from the server all the time
3776 * if we don't have level II oplock because the server can delay mtime
3777 * change - so we can't make a decision about inode invalidating.
3778 * And we can also fail with pagereading if there are mandatory locks
3779 * on pages affected by this read but not on the region from pos to
3780 * pos+len-1.
3781 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003782 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003783 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003784
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003785 if (cap_unix(tcon->ses) &&
3786 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3787 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003788 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003789
3790 /*
3791 * We need to hold the sem to be sure nobody modifies lock list
3792 * with a brlock that prevents reading.
3793 */
3794 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003795 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003796 tcon->ses->server->vals->shared_lock_type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10003797 0, NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003798 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003799 up_read(&cinode->lock_sem);
3800 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003801}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003803static ssize_t
3804cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805{
3806 int rc = -EACCES;
3807 unsigned int bytes_read = 0;
3808 unsigned int total_read;
3809 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003810 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003812 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003813 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003814 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003815 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003817 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003818 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003819 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003821 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003822 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003824 /* FIXME: set up handlers for larger reads and/or convert to async */
3825 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3826
Linus Torvalds1da177e2005-04-16 15:20:36 -07003827 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303828 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003829 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303830 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003832 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003833 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003834 server = tcon->ses->server;
3835
3836 if (!server->ops->sync_read) {
3837 free_xid(xid);
3838 return -ENOSYS;
3839 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003841 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3842 pid = open_file->pid;
3843 else
3844 pid = current->tgid;
3845
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003847 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003849 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3850 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003851 do {
3852 current_read_size = min_t(uint, read_size - total_read,
3853 rsize);
3854 /*
3855 * For windows me and 9x we do not want to request more
3856 * than it negotiated since it will refuse the read
3857 * then.
3858 */
3859 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003860 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003861 current_read_size = min_t(uint,
3862 current_read_size, CIFSMaxBufSize);
3863 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003864 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003865 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003866 if (rc != 0)
3867 break;
3868 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003869 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003870 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003871 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003872 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003873 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003874 &bytes_read, &cur_offset,
3875 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003876 } while (rc == -EAGAIN);
3877
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878 if (rc || (bytes_read == 0)) {
3879 if (total_read) {
3880 break;
3881 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003882 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883 return rc;
3884 }
3885 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003886 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003887 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888 }
3889 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003890 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891 return total_read;
3892}
3893
Jeff Laytonca83ce32011-04-12 09:13:44 -04003894/*
3895 * If the page is mmap'ed into a process' page tables, then we need to make
3896 * sure that it doesn't change while being written back.
3897 */
Souptick Joardera5240cb2018-04-15 00:58:25 +05303898static vm_fault_t
Dave Jiang11bac802017-02-24 14:56:41 -08003899cifs_page_mkwrite(struct vm_fault *vmf)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003900{
3901 struct page *page = vmf->page;
3902
3903 lock_page(page);
3904 return VM_FAULT_LOCKED;
3905}
3906
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003907static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003908 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003909 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003910 .page_mkwrite = cifs_page_mkwrite,
3911};
3912
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003913int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3914{
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003915 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05003916 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003917
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003918 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003919
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003920 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003921 rc = cifs_zap_mapping(inode);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003922 if (!rc)
3923 rc = generic_file_mmap(file, vma);
3924 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003925 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003926
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003927 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003928 return rc;
3929}
3930
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3932{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933 int rc, xid;
3934
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003935 xid = get_xid();
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003936
Jeff Laytonabab0952010-02-12 07:44:18 -05003937 rc = cifs_revalidate_file(file);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003938 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003939 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3940 rc);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003941 if (!rc)
3942 rc = generic_file_mmap(file, vma);
3943 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003944 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003945
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003946 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947 return rc;
3948}
3949
Jeff Layton0471ca32012-05-16 07:13:16 -04003950static void
3951cifs_readv_complete(struct work_struct *work)
3952{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003953 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003954 struct cifs_readdata *rdata = container_of(work,
3955 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003956
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003957 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003958 for (i = 0; i < rdata->nr_pages; i++) {
3959 struct page *page = rdata->pages[i];
3960
Jeff Layton0471ca32012-05-16 07:13:16 -04003961 lru_cache_add_file(page);
3962
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003963 if (rdata->result == 0 ||
3964 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003965 flush_dcache_page(page);
3966 SetPageUptodate(page);
3967 }
3968
3969 unlock_page(page);
3970
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003971 if (rdata->result == 0 ||
3972 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04003973 cifs_readpage_to_fscache(rdata->mapping->host, page);
3974
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003975 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003976
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003977 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003978 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003979 }
Jeff Layton6993f742012-05-16 07:13:17 -04003980 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003981}
3982
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003983static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003984readpages_fill_pages(struct TCP_Server_Info *server,
3985 struct cifs_readdata *rdata, struct iov_iter *iter,
3986 unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003987{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003988 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003989 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003990 u64 eof;
3991 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003992 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003993 unsigned int page_offset = rdata->page_offset;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003994
3995 /* determine the eof that the server (probably) has */
3996 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003997 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003998 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003999
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004000 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004001 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004002 for (i = 0; i < nr_pages; i++) {
4003 struct page *page = rdata->pages[i];
Long Li1dbe3462018-05-30 12:47:55 -07004004 unsigned int to_read = rdata->pagesz;
4005 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004006
Long Li1dbe3462018-05-30 12:47:55 -07004007 if (i == 0)
4008 to_read -= page_offset;
4009 else
4010 page_offset = 0;
4011
4012 n = to_read;
4013
4014 if (len >= to_read) {
4015 len -= to_read;
Jeff Layton8321fec2012-09-19 06:22:32 -07004016 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004017 /* enough for partial page, fill and zero the rest */
Long Li1dbe3462018-05-30 12:47:55 -07004018 zero_user(page, len + page_offset, to_read - len);
Al Viro71335662016-01-09 19:54:50 -05004019 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07004020 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004021 } else if (page->index > eof_index) {
4022 /*
4023 * The VFS will not try to do readahead past the
4024 * i_size, but it's possible that we have outstanding
4025 * writes with gaps in the middle and the i_size hasn't
4026 * caught up yet. Populate those with zeroed out pages
4027 * to prevent the VFS from repeatedly attempting to
4028 * fill them until the writes are flushed.
4029 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004030 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004031 lru_cache_add_file(page);
4032 flush_dcache_page(page);
4033 SetPageUptodate(page);
4034 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004035 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004036 rdata->pages[i] = NULL;
4037 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07004038 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004039 } else {
4040 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004041 lru_cache_add_file(page);
4042 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004043 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004044 rdata->pages[i] = NULL;
4045 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07004046 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004047 }
Jeff Layton8321fec2012-09-19 06:22:32 -07004048
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004049 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07004050 result = copy_page_from_iter(
4051 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07004052#ifdef CONFIG_CIFS_SMB_DIRECT
4053 else if (rdata->mr)
4054 result = n;
4055#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004056 else
Long Li1dbe3462018-05-30 12:47:55 -07004057 result = cifs_read_page_from_socket(
4058 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07004059 if (result < 0)
4060 break;
4061
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004062 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004063 }
4064
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004065 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
4066 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004067}
4068
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004069static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004070cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
4071 struct cifs_readdata *rdata, unsigned int len)
4072{
4073 return readpages_fill_pages(server, rdata, NULL, len);
4074}
4075
4076static int
4077cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
4078 struct cifs_readdata *rdata,
4079 struct iov_iter *iter)
4080{
4081 return readpages_fill_pages(server, rdata, iter, iter->count);
4082}
4083
4084static int
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004085readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
4086 unsigned int rsize, struct list_head *tmplist,
4087 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
4088{
4089 struct page *page, *tpage;
4090 unsigned int expected_index;
4091 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07004092 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004093
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004094 INIT_LIST_HEAD(tmplist);
4095
Nikolay Borisovf86196e2019-01-03 15:29:02 -08004096 page = lru_to_page(page_list);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004097
4098 /*
4099 * Lock the page and put it in the cache. Since no one else
4100 * should have access to this page, we're safe to simply set
4101 * PG_locked without checking it first.
4102 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004103 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004104 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07004105 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004106
4107 /* give up if we can't stick it in the cache */
4108 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004109 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004110 return rc;
4111 }
4112
4113 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004114 *offset = (loff_t)page->index << PAGE_SHIFT;
4115 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004116 *nr_pages = 1;
4117 list_move_tail(&page->lru, tmplist);
4118
4119 /* now try and add more pages onto the request */
4120 expected_index = page->index + 1;
4121 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
4122 /* discontinuity ? */
4123 if (page->index != expected_index)
4124 break;
4125
4126 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004127 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004128 break;
4129
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004130 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07004131 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004132 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004133 break;
4134 }
4135 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004136 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004137 expected_index++;
4138 (*nr_pages)++;
4139 }
4140 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004141}
4142
Linus Torvalds1da177e2005-04-16 15:20:36 -07004143static int cifs_readpages(struct file *file, struct address_space *mapping,
4144 struct list_head *page_list, unsigned num_pages)
4145{
Jeff Layton690c5e32011-10-19 15:30:16 -04004146 int rc;
4147 struct list_head tmplist;
4148 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04004149 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004150 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04004151 pid_t pid;
Steve French0cb012d2018-10-11 01:01:02 -05004152 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153
Steve French0cb012d2018-10-11 01:01:02 -05004154 xid = get_xid();
Jeff Layton690c5e32011-10-19 15:30:16 -04004155 /*
Suresh Jayaraman566982362010-07-05 18:13:25 +05304156 * Reads as many pages as possible from fscache. Returns -ENOBUFS
4157 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00004158 *
4159 * After this point, every page in the list might have PG_fscache set,
4160 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman566982362010-07-05 18:13:25 +05304161 */
4162 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
4163 &num_pages);
Steve French0cb012d2018-10-11 01:01:02 -05004164 if (rc == 0) {
4165 free_xid(xid);
Jeff Layton690c5e32011-10-19 15:30:16 -04004166 return rc;
Steve French0cb012d2018-10-11 01:01:02 -05004167 }
Suresh Jayaraman566982362010-07-05 18:13:25 +05304168
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004169 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4170 pid = open_file->pid;
4171 else
4172 pid = current->tgid;
4173
Jeff Layton690c5e32011-10-19 15:30:16 -04004174 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004175 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004176
Joe Perchesf96637b2013-05-04 22:12:25 -05004177 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4178 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04004179
4180 /*
4181 * Start with the page at end of list and move it to private
4182 * list. Do the same with any following pages until we hit
4183 * the rsize limit, hit an index discontinuity, or run out of
4184 * pages. Issue the async read and then start the loop again
4185 * until the list is empty.
4186 *
4187 * Note that list order is important. The page_list is in
4188 * the order of declining indexes. When we put the pages in
4189 * the rdata->pages, then we want them in increasing order.
4190 */
4191 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004192 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04004193 loff_t offset;
4194 struct page *page, *tpage;
4195 struct cifs_readdata *rdata;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004196 struct cifs_credits credits_on_stack;
4197 struct cifs_credits *credits = &credits_on_stack;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004199 if (open_file->invalidHandle) {
4200 rc = cifs_reopen_file(open_file, true);
4201 if (rc == -EAGAIN)
4202 continue;
4203 else if (rc)
4204 break;
4205 }
4206
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004207 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004208 &rsize, credits);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004209 if (rc)
4210 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211
Jeff Layton690c5e32011-10-19 15:30:16 -04004212 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004213 * Give up immediately if rsize is too small to read an entire
4214 * page. The VFS will fall back to readpage. We should never
4215 * reach this point however since we set ra_pages to 0 when the
4216 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04004217 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004218 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004219 add_credits_and_wake_if(server, credits, 0);
Steve French0cb012d2018-10-11 01:01:02 -05004220 free_xid(xid);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004221 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004224 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
4225 &nr_pages, &offset, &bytes);
4226 if (rc) {
4227 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04004229 }
4230
Jeff Layton0471ca32012-05-16 07:13:16 -04004231 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04004232 if (!rdata) {
4233 /* best to give up if we're out of mem */
4234 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4235 list_del(&page->lru);
4236 lru_cache_add_file(page);
4237 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004238 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04004239 }
4240 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004241 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04004242 break;
4243 }
4244
Jeff Layton6993f742012-05-16 07:13:17 -04004245 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04004246 rdata->mapping = mapping;
4247 rdata->offset = offset;
4248 rdata->bytes = bytes;
4249 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004250 rdata->pagesz = PAGE_SIZE;
Long Li1dbe3462018-05-30 12:47:55 -07004251 rdata->tailsz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07004252 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004253 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004254 rdata->credits = credits_on_stack;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004255
4256 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4257 list_del(&page->lru);
4258 rdata->pages[rdata->nr_pages++] = page;
4259 }
Jeff Layton690c5e32011-10-19 15:30:16 -04004260
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004261 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4262
4263 if (!rc) {
4264 if (rdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004265 rc = -EAGAIN;
4266 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004267 rc = server->ops->async_readv(rdata);
4268 }
4269
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004270 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004271 add_credits_and_wake_if(server, &rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004272 for (i = 0; i < rdata->nr_pages; i++) {
4273 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04004274 lru_cache_add_file(page);
4275 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004276 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004277 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04004278 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04004279 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280 break;
4281 }
Jeff Layton6993f742012-05-16 07:13:17 -04004282
4283 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004284 }
4285
David Howells54afa992013-09-04 17:10:39 +00004286 /* Any pages that have been shown to fscache but didn't get added to
4287 * the pagecache must be uncached before they get returned to the
4288 * allocator.
4289 */
4290 cifs_fscache_readpages_cancel(mapping->host, page_list);
Steve French0cb012d2018-10-11 01:01:02 -05004291 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292 return rc;
4293}
4294
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01004295/*
4296 * cifs_readpage_worker must be called with the page pinned
4297 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298static int cifs_readpage_worker(struct file *file, struct page *page,
4299 loff_t *poffset)
4300{
4301 char *read_data;
4302 int rc;
4303
Suresh Jayaraman566982362010-07-05 18:13:25 +05304304 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05004305 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304306 if (rc == 0)
4307 goto read_complete;
4308
Linus Torvalds1da177e2005-04-16 15:20:36 -07004309 read_data = kmap(page);
4310 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004311
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004312 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004313
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 if (rc < 0)
4315 goto io_error;
4316 else
Joe Perchesf96637b2013-05-04 22:12:25 -05004317 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004318
Steve French9b9c5be2018-09-22 12:07:06 -05004319 /* we do not want atime to be less than mtime, it broke some apps */
4320 file_inode(file)->i_atime = current_time(file_inode(file));
4321 if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4322 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4323 else
4324 file_inode(file)->i_atime = current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004325
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004326 if (PAGE_SIZE > rc)
4327 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328
4329 flush_dcache_page(page);
4330 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304331
4332 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05004333 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304334
Linus Torvalds1da177e2005-04-16 15:20:36 -07004335 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004336
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004338 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004339 unlock_page(page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304340
4341read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342 return rc;
4343}
4344
4345static int cifs_readpage(struct file *file, struct page *page)
4346{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004347 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004349 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004351 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352
4353 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304354 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004355 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304356 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357 }
4358
Joe Perchesf96637b2013-05-04 22:12:25 -05004359 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00004360 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004361
4362 rc = cifs_readpage_worker(file, page, &offset);
4363
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004364 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004365 return rc;
4366}
4367
Steve Frencha403a0a2007-07-26 15:54:16 +00004368static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4369{
4370 struct cifsFileInfo *open_file;
Steve French3afca262016-09-22 18:58:16 -05004371 struct cifs_tcon *tcon =
4372 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
Steve Frencha403a0a2007-07-26 15:54:16 +00004373
Steve French3afca262016-09-22 18:58:16 -05004374 spin_lock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004375 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04004376 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French3afca262016-09-22 18:58:16 -05004377 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004378 return 1;
4379 }
4380 }
Steve French3afca262016-09-22 18:58:16 -05004381 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004382 return 0;
4383}
4384
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385/* We do not want to update the file size from server for inodes
4386 open for write - to avoid races with writepage extending
4387 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004388 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004389 but this is tricky to do without racing with writebehind
4390 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00004391bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004392{
Steve Frencha403a0a2007-07-26 15:54:16 +00004393 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00004394 return true;
Steve French23e7dd72005-10-20 13:44:56 -07004395
Steve Frencha403a0a2007-07-26 15:54:16 +00004396 if (is_inode_writable(cifsInode)) {
4397 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08004398 struct cifs_sb_info *cifs_sb;
4399
Steve Frenchc32a0b62006-01-12 14:41:28 -08004400 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00004401 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004402 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08004403 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00004404 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08004405 }
4406
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004407 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00004408 return true;
Steve French7ba526312007-02-08 18:14:13 +00004409
Steve French4b18f2a2008-04-29 00:06:05 +00004410 return false;
Steve French23e7dd72005-10-20 13:44:56 -07004411 } else
Steve French4b18f2a2008-04-29 00:06:05 +00004412 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004413}
4414
Nick Piggind9414772008-09-24 11:32:59 -04004415static int cifs_write_begin(struct file *file, struct address_space *mapping,
4416 loff_t pos, unsigned len, unsigned flags,
4417 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418{
Sachin Prabhu466bd312013-09-13 14:11:57 +01004419 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004420 pgoff_t index = pos >> PAGE_SHIFT;
4421 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004422 loff_t page_start = pos & PAGE_MASK;
4423 loff_t i_size;
4424 struct page *page;
4425 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426
Joe Perchesf96637b2013-05-04 22:12:25 -05004427 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04004428
Sachin Prabhu466bd312013-09-13 14:11:57 +01004429start:
Nick Piggin54566b22009-01-04 12:00:53 -08004430 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004431 if (!page) {
4432 rc = -ENOMEM;
4433 goto out;
4434 }
Nick Piggind9414772008-09-24 11:32:59 -04004435
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004436 if (PageUptodate(page))
4437 goto out;
Steve French8a236262007-03-06 00:31:00 +00004438
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004439 /*
4440 * If we write a full page it will be up to date, no need to read from
4441 * the server. If the write is short, we'll end up doing a sync write
4442 * instead.
4443 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004444 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004445 goto out;
4446
4447 /*
4448 * optimize away the read when we have an oplock, and we're not
4449 * expecting to use any of the data we'd be reading in. That
4450 * is, when the page lies beyond the EOF, or straddles the EOF
4451 * and the write will cover all of the existing data.
4452 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004453 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004454 i_size = i_size_read(mapping->host);
4455 if (page_start >= i_size ||
4456 (offset == 0 && (pos + len) >= i_size)) {
4457 zero_user_segments(page, 0, offset,
4458 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004459 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004460 /*
4461 * PageChecked means that the parts of the page
4462 * to which we're not writing are considered up
4463 * to date. Once the data is copied to the
4464 * page, it can be set uptodate.
4465 */
4466 SetPageChecked(page);
4467 goto out;
4468 }
4469 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470
Sachin Prabhu466bd312013-09-13 14:11:57 +01004471 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004472 /*
4473 * might as well read a page, it is fast enough. If we get
4474 * an error, we don't need to return it. cifs_write_end will
4475 * do a sync write instead since PG_uptodate isn't set.
4476 */
4477 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004478 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004479 oncethru = 1;
4480 goto start;
Steve French8a236262007-03-06 00:31:00 +00004481 } else {
4482 /* we could try using another file handle if there is one -
4483 but how would we lock it to prevent close of that handle
4484 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04004485 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00004486 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004487out:
4488 *pagep = page;
4489 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490}
4491
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304492static int cifs_release_page(struct page *page, gfp_t gfp)
4493{
4494 if (PagePrivate(page))
4495 return 0;
4496
4497 return cifs_fscache_release_page(page, gfp);
4498}
4499
Lukas Czernerd47992f2013-05-21 23:17:23 -04004500static void cifs_invalidate_page(struct page *page, unsigned int offset,
4501 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304502{
4503 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4504
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004505 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304506 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4507}
4508
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004509static int cifs_launder_page(struct page *page)
4510{
4511 int rc = 0;
4512 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004513 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004514 struct writeback_control wbc = {
4515 .sync_mode = WB_SYNC_ALL,
4516 .nr_to_write = 0,
4517 .range_start = range_start,
4518 .range_end = range_end,
4519 };
4520
Joe Perchesf96637b2013-05-04 22:12:25 -05004521 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004522
4523 if (clear_page_dirty_for_io(page))
4524 rc = cifs_writepage_locked(page, &wbc);
4525
4526 cifs_fscache_invalidate_page(page, page->mapping->host);
4527 return rc;
4528}
4529
Tejun Heo9b646972010-07-20 22:09:02 +02004530void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04004531{
4532 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4533 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00004534 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004535 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004536 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004537 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004538 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004539
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004540 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10004541 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004542
4543 server->ops->downgrade_oplock(server, cinode,
4544 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
4545
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004546 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004547 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004548 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4549 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004550 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004551 }
4552
Jeff Layton3bc303c2009-09-21 06:47:50 -04004553 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004554 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05004555 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00004556 else
Al Viro8737c932009-12-24 06:47:55 -05004557 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004558 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004559 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004560 rc = filemap_fdatawait(inode->i_mapping);
4561 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004562 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004563 }
Joe Perchesf96637b2013-05-04 22:12:25 -05004564 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004565 }
4566
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004567 rc = cifs_push_locks(cfile);
4568 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004569 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004570
Jeff Layton3bc303c2009-09-21 06:47:50 -04004571 /*
4572 * releasing stale oplock after recent reconnect of smb session using
4573 * a now incorrect file handle is not a data integrity issue but do
4574 * not bother sending an oplock release if session to server still is
4575 * disconnected since oplock already released by the server
4576 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00004577 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004578 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4579 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05004580 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004581 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004582 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004583}
4584
Steve Frenchdca69282013-11-11 16:42:37 -06004585/*
4586 * The presence of cifs_direct_io() in the address space ops vector
4587 * allowes open() O_DIRECT flags which would have failed otherwise.
4588 *
4589 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4590 * so this method should never be called.
4591 *
4592 * Direct IO is not yet supported in the cached mode.
4593 */
4594static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07004595cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06004596{
4597 /*
4598 * FIXME
4599 * Eventually need to support direct IO for non forcedirectio mounts
4600 */
4601 return -EINVAL;
4602}
4603
4604
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004605const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606 .readpage = cifs_readpage,
4607 .readpages = cifs_readpages,
4608 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07004609 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004610 .write_begin = cifs_write_begin,
4611 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304613 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06004614 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304615 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004616 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004617};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004618
4619/*
4620 * cifs_readpages requires the server to support a buffer large enough to
4621 * contain the header plus one complete page of data. Otherwise, we need
4622 * to leave cifs_readpages out of the address space operations.
4623 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004624const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004625 .readpage = cifs_readpage,
4626 .writepage = cifs_writepage,
4627 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004628 .write_begin = cifs_write_begin,
4629 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004630 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304631 .releasepage = cifs_release_page,
4632 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004633 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004634};