blob: 06e27ac6d82c95f85e2b69b196502f0ddaed42e0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Nikolay Borisovf86196e2019-01-03 15:29:02 -080036#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/div64.h>
38#include "cifsfs.h"
39#include "cifspdu.h"
40#include "cifsglob.h"
41#include "cifsproto.h"
42#include "cifs_unicode.h"
43#include "cifs_debug.h"
44#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053045#include "fscache.h"
Long Libd3dcc62017-11-22 17:38:47 -070046#include "smbdirect.h"
Steve French07b92d02013-02-18 10:34:26 -060047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048static inline int cifs_convert_flags(unsigned int flags)
49{
50 if ((flags & O_ACCMODE) == O_RDONLY)
51 return GENERIC_READ;
52 else if ((flags & O_ACCMODE) == O_WRONLY)
53 return GENERIC_WRITE;
54 else if ((flags & O_ACCMODE) == O_RDWR) {
55 /* GENERIC_ALL is too much permission to request
56 can cause unnecessary access denied on create */
57 /* return GENERIC_ALL; */
58 return (GENERIC_READ | GENERIC_WRITE);
59 }
60
Jeff Laytone10f7b52008-05-14 10:21:33 -070061 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
62 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
63 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000064}
Jeff Laytone10f7b52008-05-14 10:21:33 -070065
Jeff Layton608712f2010-10-15 15:33:56 -040066static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000067{
Jeff Layton608712f2010-10-15 15:33:56 -040068 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070069
Steve French7fc8f4e2009-02-23 20:43:11 +000070 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000072 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040073 posix_flags = SMB_O_WRONLY;
74 else if ((flags & O_ACCMODE) == O_RDWR)
75 posix_flags = SMB_O_RDWR;
76
Steve French07b92d02013-02-18 10:34:26 -060077 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040078 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060079 if (flags & O_EXCL)
80 posix_flags |= SMB_O_EXCL;
81 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050082 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
83 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060084
Jeff Layton608712f2010-10-15 15:33:56 -040085 if (flags & O_TRUNC)
86 posix_flags |= SMB_O_TRUNC;
87 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010088 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000090 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040091 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000092 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040093 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000094 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040095 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000096
97 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098}
99
100static inline int cifs_get_disposition(unsigned int flags)
101{
102 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
103 return FILE_CREATE;
104 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
105 return FILE_OVERWRITE_IF;
106 else if ((flags & O_CREAT) == O_CREAT)
107 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000108 else if ((flags & O_TRUNC) == O_TRUNC)
109 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 else
111 return FILE_OPEN;
112}
113
Jeff Layton608712f2010-10-15 15:33:56 -0400114int cifs_posix_open(char *full_path, struct inode **pinode,
115 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400116 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400117{
118 int rc;
119 FILE_UNIX_BASIC_INFO *presp_data;
120 __u32 posix_flags = 0;
121 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
122 struct cifs_fattr fattr;
123 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000124 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400125
Joe Perchesf96637b2013-05-04 22:12:25 -0500126 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400127
128 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
129 if (presp_data == NULL)
130 return -ENOMEM;
131
132 tlink = cifs_sb_tlink(cifs_sb);
133 if (IS_ERR(tlink)) {
134 rc = PTR_ERR(tlink);
135 goto posix_open_ret;
136 }
137
138 tcon = tlink_tcon(tlink);
139 mode &= ~current_umask();
140
141 posix_flags = cifs_posix_convert_flags(f_flags);
142 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
143 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900144 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153 if (!pinode)
154 goto posix_open_ret; /* caller does not need info */
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158 /* get new inode and set it up */
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
167 cifs_fattr_to_inode(*pinode, &fattr);
168 }
169
170posix_open_ret:
171 kfree(presp_data);
172 return rc;
173}
174
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300175static int
176cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179{
180 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300182 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500183 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300184 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700185 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400186 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300187
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700188 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700189 return -ENOSYS;
190
191 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300192
193/*********************************************************************
194 * open flag mapping table:
195 *
196 * POSIX Flag CIFS Disposition
197 * ---------- ----------------
198 * O_CREAT FILE_OPEN_IF
199 * O_CREAT | O_EXCL FILE_CREATE
200 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
201 * O_TRUNC FILE_OVERWRITE
202 * none of the above FILE_OPEN
203 *
204 * Note that there is not a direct match between disposition
205 * FILE_SUPERSEDE (ie create whether or not file exists although
206 * O_CREAT | O_TRUNC is similar but truncates the existing
207 * file rather than creating a new file as FILE_SUPERSEDE does
208 * (which uses the attributes / metadata passed in on open call)
209 *?
210 *? O_SYNC is a reasonable match to CIFS writethrough flag
211 *? and the read write flags match reasonably. O_LARGEFILE
212 *? is irrelevant because largefile support is always used
213 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
214 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
215 *********************************************************************/
216
217 disposition = cifs_get_disposition(f_flags);
218
219 /* BB pass O_SYNC flag through on file attributes .. BB */
220
221 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500225 if (backup_cred(cifs_sb))
226 create_options |= CREATE_OPEN_BACKUP_INTENT;
227
Steve French1013e762017-09-22 01:40:27 -0500228 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
229 if (f_flags & O_SYNC)
230 create_options |= CREATE_WRITE_THROUGH;
231
232 if (f_flags & O_DIRECT)
233 create_options |= CREATE_NO_BUFFER;
234
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400235 oparms.tcon = tcon;
236 oparms.cifs_sb = cifs_sb;
237 oparms.desired_access = desired_access;
238 oparms.create_options = create_options;
239 oparms.disposition = disposition;
240 oparms.path = full_path;
241 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400242 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400243
244 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300245
246 if (rc)
247 goto out;
248
249 if (tcon->unix_ext)
250 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
251 xid);
252 else
253 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600254 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300255
256out:
257 kfree(buf);
258 return rc;
259}
260
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400261static bool
262cifs_has_mand_locks(struct cifsInodeInfo *cinode)
263{
264 struct cifs_fid_locks *cur;
265 bool has_locks = false;
266
267 down_read(&cinode->lock_sem);
268 list_for_each_entry(cur, &cinode->llist, llist) {
269 if (!list_empty(&cur->locks)) {
270 has_locks = true;
271 break;
272 }
273 }
274 up_read(&cinode->lock_sem);
275 return has_locks;
276}
277
Jeff Layton15ecb432010-10-15 15:34:02 -0400278struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700279cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400280 struct tcon_link *tlink, __u32 oplock)
281{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500282 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000283 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700284 struct cifsInodeInfo *cinode = CIFS_I(inode);
285 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700286 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700287 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400288 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400289
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700290 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
291 if (cfile == NULL)
292 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400293
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700294 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
295 if (!fdlocks) {
296 kfree(cfile);
297 return NULL;
298 }
299
300 INIT_LIST_HEAD(&fdlocks->locks);
301 fdlocks->cfile = cfile;
302 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700303 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700304 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700305 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700306
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700307 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700308 cfile->pid = current->tgid;
309 cfile->uid = current_fsuid();
310 cfile->dentry = dget(dentry);
311 cfile->f_flags = file->f_flags;
312 cfile->invalidHandle = false;
313 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700314 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700315 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500316 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400317
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100318 cifs_sb_active(inode->i_sb);
319
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400320 /*
321 * If the server returned a read oplock and we have mandatory brlocks,
322 * set oplock level to None.
323 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400324 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500325 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400326 oplock = 0;
327 }
328
Steve French3afca262016-09-22 18:58:16 -0500329 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400330 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700331 oplock = fid->pending_open->oplock;
332 list_del(&fid->pending_open->olist);
333
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400334 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400335 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700336
337 list_add(&cfile->tlist, &tcon->openFileList);
Steve Frenchfae80442018-10-19 17:14:32 -0500338 atomic_inc(&tcon->num_local_opens);
Steve French3afca262016-09-22 18:58:16 -0500339
Jeff Layton15ecb432010-10-15 15:34:02 -0400340 /* if readable file instance put first in list*/
341 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700342 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400343 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700344 list_add_tail(&cfile->flist, &cinode->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500345 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400346
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400347 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400348 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400349
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700350 file->private_data = cfile;
351 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400352}
353
Jeff Layton764a1b12012-07-25 14:59:54 -0400354struct cifsFileInfo *
355cifsFileInfo_get(struct cifsFileInfo *cifs_file)
356{
Steve French3afca262016-09-22 18:58:16 -0500357 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400358 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500359 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400360 return cifs_file;
361}
362
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100363/**
364 * cifsFileInfo_put - release a reference of file priv data
365 *
366 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
Steve Frenchcdff08e2010-10-21 22:46:14 +0000367 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400368void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
369{
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100370 _cifsFileInfo_put(cifs_file, true);
371}
372
373/**
374 * _cifsFileInfo_put - release a reference of file priv data
375 *
376 * This may involve closing the filehandle @cifs_file out on the
377 * server. Must be called without holding tcon->open_file_lock and
378 * cifs_file->file_info_lock.
379 *
380 * If @wait_for_oplock_handler is true and we are releasing the last
381 * reference, wait for any running oplock break handler of the file
382 * and cancel any pending one. If calling this function from the
383 * oplock break handler, you need to pass false.
384 *
385 */
386void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
387{
David Howells2b0143b2015-03-17 22:25:59 +0000388 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000389 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700390 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300391 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100392 struct super_block *sb = inode->i_sb;
393 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000394 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700395 struct cifs_fid fid;
396 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000397 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000398
Steve French3afca262016-09-22 18:58:16 -0500399 spin_lock(&tcon->open_file_lock);
400
401 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400402 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500403 spin_unlock(&cifs_file->file_info_lock);
404 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000405 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400406 }
Steve French3afca262016-09-22 18:58:16 -0500407 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000408
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700409 if (server->ops->get_lease_key)
410 server->ops->get_lease_key(inode, &fid);
411
412 /* store open in pending opens to make sure we don't miss lease break */
413 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
414
Steve Frenchcdff08e2010-10-21 22:46:14 +0000415 /* remove it from the lists */
416 list_del(&cifs_file->flist);
417 list_del(&cifs_file->tlist);
Steve Frenchfae80442018-10-19 17:14:32 -0500418 atomic_dec(&tcon->num_local_opens);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000419
420 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500421 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000422 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700423 /*
424 * In strict cache mode we need invalidate mapping on the last
425 * close because it may cause a error when we open this file
426 * again and get at least level II oplock.
427 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300428 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400429 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300430 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000431 }
Steve French3afca262016-09-22 18:58:16 -0500432
433 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000434
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100435 oplock_break_cancelled = wait_oplock_handler ?
436 cancel_work_sync(&cifs_file->oplock_break) : false;
Jeff Laytonad635942011-07-26 12:20:17 -0400437
Steve Frenchcdff08e2010-10-21 22:46:14 +0000438 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700439 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400440 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700441
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400442 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700443 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400444 server->ops->close(xid, tcon, &cifs_file->fid);
445 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000446 }
447
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000448 if (oplock_break_cancelled)
449 cifs_done_oplock_break(cifsi);
450
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700451 cifs_del_pending_open(&open);
452
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700453 /*
454 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000455 * is closed anyway.
456 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700457 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700458 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000459 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400460 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000461 kfree(li);
462 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700463 list_del(&cifs_file->llist->llist);
464 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700465 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000466
467 cifs_put_tlink(cifs_file->tlink);
468 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100469 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000470 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400471}
472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475{
476 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400477 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400478 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700480 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000481 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400482 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700483 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300485 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700486 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700487 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400489 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
491 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400492 tlink = cifs_sb_tlink(cifs_sb);
493 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400494 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400495 return PTR_ERR(tlink);
496 }
497 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700498 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500500 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530502 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400503 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 }
505
Joe Perchesf96637b2013-05-04 22:12:25 -0500506 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000507 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000508
Namjae Jeon787aded2014-08-22 14:22:51 +0900509 if (file->f_flags & O_DIRECT &&
510 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
511 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
512 file->f_op = &cifs_file_direct_nobrl_ops;
513 else
514 file->f_op = &cifs_file_direct_ops;
515 }
516
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700517 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000518 oplock = REQ_OPLOCK;
519 else
520 oplock = 0;
521
Steve French64cc2c62009-03-04 19:54:08 +0000522 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400523 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
524 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000525 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400526 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000527 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700528 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000529 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500530 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300531 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000532 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
533 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500534 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
535 tcon->ses->serverName,
536 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000537 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000538 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
539 (rc != -EOPNOTSUPP)) /* path not found or net err */
540 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700541 /*
542 * Else fallthrough to retry open the old way on network i/o
543 * or DFS errors.
544 */
Steve French276a74a2009-03-03 18:00:34 +0000545 }
546
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700547 if (server->ops->get_lease_key)
548 server->ops->get_lease_key(inode, &fid);
549
550 cifs_add_pending_open(&fid, tlink, &open);
551
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300552 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700553 if (server->ops->get_lease_key)
554 server->ops->get_lease_key(inode, &fid);
555
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300556 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700557 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700558 if (rc) {
559 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300560 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700561 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300562 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400563
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700564 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
565 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700566 if (server->ops->close)
567 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700568 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 rc = -ENOMEM;
570 goto out;
571 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530573 cifs_fscache_set_inode_cookie(inode, file);
574
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300575 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700576 /*
577 * Time to set mode which we can not set earlier due to
578 * problems creating new read-only files.
579 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300580 struct cifs_unix_set_info_args args = {
581 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800582 .uid = INVALID_UID, /* no change */
583 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300584 .ctime = NO_CHANGE_64,
585 .atime = NO_CHANGE_64,
586 .mtime = NO_CHANGE_64,
587 .device = 0,
588 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700589 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
590 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 }
592
593out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400595 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400596 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 return rc;
598}
599
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400600static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
601
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700602/*
603 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400604 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700605 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400606static int
607cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400609 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000610 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400611 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 int rc = 0;
613
Rabin Vincent560d3882017-05-03 17:17:21 +0200614 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400615 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400616 /* can cache locks - no need to relock */
617 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400618 return rc;
619 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400621 if (cap_unix(tcon->ses) &&
622 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
623 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
624 rc = cifs_push_posix_locks(cfile);
625 else
626 rc = tcon->ses->server->ops->push_mand_locks(cfile);
627
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400628 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 return rc;
630}
631
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700632static int
633cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634{
635 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400636 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400637 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000639 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700640 struct TCP_Server_Info *server;
641 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000642 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700644 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500646 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400647 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400649 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700650 mutex_lock(&cfile->fh_mutex);
651 if (!cfile->invalidHandle) {
652 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530653 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400654 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530655 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 }
657
David Howells2b0143b2015-03-17 22:25:59 +0000658 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700660 tcon = tlink_tcon(cfile->tlink);
661 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000662
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700663 /*
664 * Can not grab rename sem here because various ops, including those
665 * that already have the rename sem can end up causing writepage to get
666 * called and if the server was down that means we end up here, and we
667 * can never tell if the caller already has the rename_sem.
668 */
669 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000671 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700672 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400673 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000674 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 }
676
Joe Perchesf96637b2013-05-04 22:12:25 -0500677 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
678 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300680 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 oplock = REQ_OPLOCK;
682 else
Steve French4b18f2a2008-04-29 00:06:05 +0000683 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400685 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000686 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400687 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400688 /*
689 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
690 * original open. Must mask them off for a reopen.
691 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700692 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400693 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400694
Jeff Layton2422f672010-06-16 13:40:16 -0400695 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700696 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400697 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000698 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500699 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200700 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000701 goto reopen_success;
702 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700703 /*
704 * fallthrough to retry open the old way on errors, especially
705 * in the reconnect path it is important to retry hard
706 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000707 }
708
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700709 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000710
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500711 if (backup_cred(cifs_sb))
712 create_options |= CREATE_OPEN_BACKUP_INTENT;
713
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700714 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400715 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700716
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400717 oparms.tcon = tcon;
718 oparms.cifs_sb = cifs_sb;
719 oparms.desired_access = desired_access;
720 oparms.create_options = create_options;
721 oparms.disposition = disposition;
722 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400723 oparms.fid = &cfile->fid;
724 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400725
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700726 /*
727 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400728 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700729 * file might have write behind data that needs to be flushed and server
730 * version of file size can be stale. If we knew for sure that inode was
731 * not dirty locally we could do this.
732 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400733 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400734 if (rc == -ENOENT && oparms.reconnect == false) {
735 /* durable handle timeout is expired - open the file again */
736 rc = server->ops->open(xid, &oparms, &oplock, NULL);
737 /* indicate that we need to relock the file */
738 oparms.reconnect = true;
739 }
740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700742 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500743 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
744 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400745 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 }
Jeff Layton15886172010-10-15 15:33:59 -0400747
748reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700749 cfile->invalidHandle = false;
750 mutex_unlock(&cfile->fh_mutex);
751 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400752
753 if (can_flush) {
754 rc = filemap_write_and_wait(inode->i_mapping);
Pavel Shilovsky9a663962019-01-08 11:15:28 -0800755 if (!is_interrupt_error(rc))
756 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400757
Jeff Layton15886172010-10-15 15:33:59 -0400758 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700759 rc = cifs_get_inode_info_unix(&inode, full_path,
760 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400761 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700762 rc = cifs_get_inode_info(&inode, full_path, NULL,
763 inode->i_sb, xid, NULL);
764 }
765 /*
766 * Else we are writing out data to server already and could deadlock if
767 * we tried to flush data, and since we do not know if we have data that
768 * would invalidate the current end of file on the server we can not go
769 * to the server to get the new inode info.
770 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300771
Pavel Shilovskyde740252016-10-11 15:34:07 -0700772 /*
773 * If the server returned a read oplock and we have mandatory brlocks,
774 * set oplock level to None.
775 */
776 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
777 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
778 oplock = 0;
779 }
780
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400781 server->ops->set_fid(cfile, &cfile->fid, oplock);
782 if (oparms.reconnect)
783 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400784
785reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400787 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 return rc;
789}
790
791int cifs_close(struct inode *inode, struct file *file)
792{
Jeff Layton77970692011-04-05 16:23:47 -0700793 if (file->private_data != NULL) {
794 cifsFileInfo_put(file->private_data);
795 file->private_data = NULL;
796 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797
Steve Frenchcdff08e2010-10-21 22:46:14 +0000798 /* return code from the ->release op is always ignored */
799 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800}
801
Steve French52ace1e2016-09-22 19:23:56 -0500802void
803cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
804{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700805 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500806 struct list_head *tmp;
807 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700808 struct list_head tmp_list;
809
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800810 if (!tcon->use_persistent || !tcon->need_reopen_files)
811 return;
812
813 tcon->need_reopen_files = false;
814
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700815 cifs_dbg(FYI, "Reopen persistent handles");
816 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500817
818 /* list all files open on tree connection, reopen resilient handles */
819 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700820 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500821 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700822 if (!open_file->invalidHandle)
823 continue;
824 cifsFileInfo_get(open_file);
825 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500826 }
827 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700828
829 list_for_each_safe(tmp, tmp1, &tmp_list) {
830 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800831 if (cifs_reopen_file(open_file, false /* do not flush */))
832 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700833 list_del_init(&open_file->rlist);
834 cifsFileInfo_put(open_file);
835 }
Steve French52ace1e2016-09-22 19:23:56 -0500836}
837
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838int cifs_closedir(struct inode *inode, struct file *file)
839{
840 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400841 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700842 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700843 struct cifs_tcon *tcon;
844 struct TCP_Server_Info *server;
845 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
Joe Perchesf96637b2013-05-04 22:12:25 -0500847 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700849 if (cfile == NULL)
850 return rc;
851
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400852 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700853 tcon = tlink_tcon(cfile->tlink);
854 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
Joe Perchesf96637b2013-05-04 22:12:25 -0500856 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500857 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400858 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700859 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500860 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700861 if (server->ops->close_dir)
862 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
863 else
864 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500865 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700866 /* not much we can do if it fails anyway, ignore rc */
867 rc = 0;
868 } else
Steve French3afca262016-09-22 18:58:16 -0500869 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700871 buf = cfile->srch_inf.ntwrk_buf_start;
872 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500873 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700874 cfile->srch_inf.ntwrk_buf_start = NULL;
875 if (cfile->srch_inf.smallBuf)
876 cifs_small_buf_release(buf);
877 else
878 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700880
881 cifs_put_tlink(cfile->tlink);
882 kfree(file->private_data);
883 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400885 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 return rc;
887}
888
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400889static struct cifsLockInfo *
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000890cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000891{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400892 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000893 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400894 if (!lock)
895 return lock;
896 lock->offset = offset;
897 lock->length = length;
898 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400899 lock->pid = current->tgid;
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000900 lock->flags = flags;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400901 INIT_LIST_HEAD(&lock->blist);
902 init_waitqueue_head(&lock->block_q);
903 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400904}
905
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700906void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400907cifs_del_lock_waiters(struct cifsLockInfo *lock)
908{
909 struct cifsLockInfo *li, *tmp;
910 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
911 list_del_init(&li->blist);
912 wake_up(&li->block_q);
913 }
914}
915
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400916#define CIFS_LOCK_OP 0
917#define CIFS_READ_OP 1
918#define CIFS_WRITE_OP 2
919
920/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400921static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700922cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000923 __u64 length, __u8 type, __u16 flags,
924 struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400925 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400926{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300927 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700928 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300929 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400930
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700931 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400932 if (offset + length <= li->offset ||
933 offset >= li->offset + li->length)
934 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400935 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
936 server->ops->compare_fids(cfile, cur_cfile)) {
937 /* shared lock prevents write op through the same fid */
938 if (!(li->type & server->vals->shared_lock_type) ||
939 rw_check != CIFS_WRITE_OP)
940 continue;
941 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700942 if ((type & server->vals->shared_lock_type) &&
943 ((server->ops->compare_fids(cfile, cur_cfile) &&
944 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400945 continue;
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000946 if (rw_check == CIFS_LOCK_OP &&
947 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
948 server->ops->compare_fids(cfile, cur_cfile))
949 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700950 if (conf_lock)
951 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700952 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400953 }
954 return false;
955}
956
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700957bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300958cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000959 __u8 type, __u16 flags,
960 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400961{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300962 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700963 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000964 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300965
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700966 list_for_each_entry(cur, &cinode->llist, llist) {
967 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000968 flags, cfile, conf_lock,
969 rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300970 if (rc)
971 break;
972 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300973
974 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400975}
976
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300977/*
978 * Check if there is another lock that prevents us to set the lock (mandatory
979 * style). If such a lock exists, update the flock structure with its
980 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
981 * or leave it the same if we can't. Returns 0 if we don't need to request to
982 * the server or 1 otherwise.
983 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400984static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300985cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
986 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400987{
988 int rc = 0;
989 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000990 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300991 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400992 bool exist;
993
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700994 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400995
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300996 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000997 flock->fl_flags, &conf_lock,
998 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400999 if (exist) {
1000 flock->fl_start = conf_lock->offset;
1001 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1002 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001003 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001004 flock->fl_type = F_RDLCK;
1005 else
1006 flock->fl_type = F_WRLCK;
1007 } else if (!cinode->can_cache_brlcks)
1008 rc = 1;
1009 else
1010 flock->fl_type = F_UNLCK;
1011
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001012 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001013 return rc;
1014}
1015
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001016static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001017cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001018{
David Howells2b0143b2015-03-17 22:25:59 +00001019 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001020 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001021 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001022 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001023}
1024
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001025/*
1026 * Set the byte-range lock (mandatory style). Returns:
1027 * 1) 0, if we set the lock and don't need to request to the server;
1028 * 2) 1, if no locks prevent us but we need to request to the server;
Colin Ian King413d6102018-10-26 19:07:21 +01001029 * 3) -EACCES, if there is a lock that prevents us and wait is false.
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001030 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001031static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001032cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001033 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001034{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001035 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001036 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001037 bool exist;
1038 int rc = 0;
1039
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001040try_again:
1041 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001042 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001043
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001044 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001045 lock->type, lock->flags, &conf_lock,
1046 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001047 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001048 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001049 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001050 return rc;
1051 }
1052
1053 if (!exist)
1054 rc = 1;
1055 else if (!wait)
1056 rc = -EACCES;
1057 else {
1058 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001059 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001060 rc = wait_event_interruptible(lock->block_q,
1061 (lock->blist.prev == &lock->blist) &&
1062 (lock->blist.next == &lock->blist));
1063 if (!rc)
1064 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001065 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001066 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001067 }
1068
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001069 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001070 return rc;
1071}
1072
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001073/*
1074 * Check if there is another lock that prevents us to set the lock (posix
1075 * style). If such a lock exists, update the flock structure with its
1076 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1077 * or leave it the same if we can't. Returns 0 if we don't need to request to
1078 * the server or 1 otherwise.
1079 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001080static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001081cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1082{
1083 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001084 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001085 unsigned char saved_type = flock->fl_type;
1086
Pavel Shilovsky50792762011-10-29 17:17:57 +04001087 if ((flock->fl_flags & FL_POSIX) == 0)
1088 return 1;
1089
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001090 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001091 posix_test_lock(file, flock);
1092
1093 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1094 flock->fl_type = saved_type;
1095 rc = 1;
1096 }
1097
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001098 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001099 return rc;
1100}
1101
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001102/*
1103 * Set the byte-range lock (posix style). Returns:
1104 * 1) 0, if we set the lock and don't need to request to the server;
1105 * 2) 1, if we need to request to the server;
1106 * 3) <0, if the error occurs while setting the lock.
1107 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001108static int
1109cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1110{
Al Viro496ad9a2013-01-23 17:07:38 -05001111 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001112 int rc = 1;
1113
1114 if ((flock->fl_flags & FL_POSIX) == 0)
1115 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001116
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001117try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001118 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001119 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001120 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001121 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001122 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001123
1124 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001125 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001126 if (rc == FILE_LOCK_DEFERRED) {
NeilBrownada5c1d2018-11-30 10:04:08 +11001127 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_blocker);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001128 if (!rc)
1129 goto try_again;
NeilBrowncb03f942018-11-30 10:04:08 +11001130 locks_delete_block(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001131 }
Steve French9ebb3892012-04-01 13:52:54 -05001132 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001133}
1134
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001135int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001136cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001137{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001138 unsigned int xid;
1139 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001140 struct cifsLockInfo *li, *tmp;
1141 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001142 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001143 LOCKING_ANDX_RANGE *buf, *cur;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001144 static const int types[] = {
1145 LOCKING_ANDX_LARGE_FILES,
1146 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1147 };
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001148 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001149
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001150 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001151 tcon = tlink_tcon(cfile->tlink);
1152
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001153 /*
1154 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001155 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001156 */
1157 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001158 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001159 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001160 return -EINVAL;
1161 }
1162
Ross Lagerwall92a81092019-01-08 18:30:56 +00001163 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1164 PAGE_SIZE);
1165 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1166 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001167 max_num = (max_buf - sizeof(struct smb_hdr)) /
1168 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001169 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001170 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001171 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001172 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001173 }
1174
1175 for (i = 0; i < 2; i++) {
1176 cur = buf;
1177 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001178 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001179 if (li->type != types[i])
1180 continue;
1181 cur->Pid = cpu_to_le16(li->pid);
1182 cur->LengthLow = cpu_to_le32((u32)li->length);
1183 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1184 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1185 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1186 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001187 stored_rc = cifs_lockv(xid, tcon,
1188 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001189 (__u8)li->type, 0, num,
1190 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001191 if (stored_rc)
1192 rc = stored_rc;
1193 cur = buf;
1194 num = 0;
1195 } else
1196 cur++;
1197 }
1198
1199 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001200 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001201 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001202 if (stored_rc)
1203 rc = stored_rc;
1204 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001205 }
1206
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001207 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001208 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001209 return rc;
1210}
1211
Jeff Layton3d224622016-05-24 06:27:44 -04001212static __u32
1213hash_lockowner(fl_owner_t owner)
1214{
1215 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1216}
1217
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001218struct lock_to_push {
1219 struct list_head llist;
1220 __u64 offset;
1221 __u64 length;
1222 __u32 pid;
1223 __u16 netfid;
1224 __u8 type;
1225};
1226
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001227static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001228cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001229{
David Howells2b0143b2015-03-17 22:25:59 +00001230 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001231 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001232 struct file_lock *flock;
1233 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001234 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001235 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001236 struct list_head locks_to_send, *el;
1237 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001238 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001239
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001240 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001241
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001242 if (!flctx)
1243 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001244
Jeff Laytone084c1b2015-02-16 14:32:03 -05001245 spin_lock(&flctx->flc_lock);
1246 list_for_each(el, &flctx->flc_posix) {
1247 count++;
1248 }
1249 spin_unlock(&flctx->flc_lock);
1250
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001251 INIT_LIST_HEAD(&locks_to_send);
1252
1253 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001254 * Allocating count locks is enough because no FL_POSIX locks can be
1255 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001256 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001257 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001258 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001259 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1260 if (!lck) {
1261 rc = -ENOMEM;
1262 goto err_out;
1263 }
1264 list_add_tail(&lck->llist, &locks_to_send);
1265 }
1266
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001267 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001268 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001269 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001270 if (el == &locks_to_send) {
1271 /*
1272 * The list ended. We don't have enough allocated
1273 * structures - something is really wrong.
1274 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001275 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001276 break;
1277 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001278 length = 1 + flock->fl_end - flock->fl_start;
1279 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1280 type = CIFS_RDLCK;
1281 else
1282 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001283 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001284 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001285 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001286 lck->length = length;
1287 lck->type = type;
1288 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001289 }
Jeff Layton6109c852015-01-16 15:05:57 -05001290 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001291
1292 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001293 int stored_rc;
1294
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001295 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001296 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001297 lck->type, 0);
1298 if (stored_rc)
1299 rc = stored_rc;
1300 list_del(&lck->llist);
1301 kfree(lck);
1302 }
1303
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001304out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001305 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001306 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001307err_out:
1308 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1309 list_del(&lck->llist);
1310 kfree(lck);
1311 }
1312 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001313}
1314
1315static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001316cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001317{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001318 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001319 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001320 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001321 int rc = 0;
1322
1323 /* we are going to update can_cache_brlcks here - need a write access */
1324 down_write(&cinode->lock_sem);
1325 if (!cinode->can_cache_brlcks) {
1326 up_write(&cinode->lock_sem);
1327 return rc;
1328 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001329
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001330 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001331 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1332 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001333 rc = cifs_push_posix_locks(cfile);
1334 else
1335 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001336
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001337 cinode->can_cache_brlcks = false;
1338 up_write(&cinode->lock_sem);
1339 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001340}
1341
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001342static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001343cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001344 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001346 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001347 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001348 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001349 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001350 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001351 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001352 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001354 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001355 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001356 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001357 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001358 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001359 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001360 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001361 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001363 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001364 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001365 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001366 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001367 *lock = 1;
1368 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001369 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001370 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001371 *unlock = 1;
1372 /* Check if unlock includes more than one lock range */
1373 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001374 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001375 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001376 *lock = 1;
1377 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001378 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001379 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001380 *lock = 1;
1381 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001382 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001383 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001384 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001386 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001387}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001389static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001390cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001391 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001392{
1393 int rc = 0;
1394 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001395 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1396 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001397 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001398 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001400 if (posix_lck) {
1401 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001402
1403 rc = cifs_posix_lock_test(file, flock);
1404 if (!rc)
1405 return rc;
1406
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001407 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001408 posix_lock_type = CIFS_RDLCK;
1409 else
1410 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001411 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1412 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001413 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001414 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 return rc;
1416 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001417
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001418 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001419 if (!rc)
1420 return rc;
1421
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001422 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001423 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1424 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001425 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001426 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1427 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001428 flock->fl_type = F_UNLCK;
1429 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001430 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1431 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001432 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001433 }
1434
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001435 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001436 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001437 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001438 }
1439
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001440 type &= ~server->vals->exclusive_lock_type;
1441
1442 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1443 type | server->vals->shared_lock_type,
1444 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001445 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001446 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1447 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001448 flock->fl_type = F_RDLCK;
1449 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001450 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1451 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001452 } else
1453 flock->fl_type = F_WRLCK;
1454
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001455 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001456}
1457
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001458void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001459cifs_move_llist(struct list_head *source, struct list_head *dest)
1460{
1461 struct list_head *li, *tmp;
1462 list_for_each_safe(li, tmp, source)
1463 list_move(li, dest);
1464}
1465
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001466void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001467cifs_free_llist(struct list_head *llist)
1468{
1469 struct cifsLockInfo *li, *tmp;
1470 list_for_each_entry_safe(li, tmp, llist, llist) {
1471 cifs_del_lock_waiters(li);
1472 list_del(&li->llist);
1473 kfree(li);
1474 }
1475}
1476
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001477int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001478cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1479 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001480{
1481 int rc = 0, stored_rc;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001482 static const int types[] = {
1483 LOCKING_ANDX_LARGE_FILES,
1484 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1485 };
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001486 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001487 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001488 LOCKING_ANDX_RANGE *buf, *cur;
1489 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001490 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001491 struct cifsLockInfo *li, *tmp;
1492 __u64 length = 1 + flock->fl_end - flock->fl_start;
1493 struct list_head tmp_llist;
1494
1495 INIT_LIST_HEAD(&tmp_llist);
1496
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001497 /*
1498 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001499 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001500 */
1501 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001502 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001503 return -EINVAL;
1504
Ross Lagerwall92a81092019-01-08 18:30:56 +00001505 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1506 PAGE_SIZE);
1507 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1508 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001509 max_num = (max_buf - sizeof(struct smb_hdr)) /
1510 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001511 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001512 if (!buf)
1513 return -ENOMEM;
1514
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001515 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001516 for (i = 0; i < 2; i++) {
1517 cur = buf;
1518 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001519 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001520 if (flock->fl_start > li->offset ||
1521 (flock->fl_start + length) <
1522 (li->offset + li->length))
1523 continue;
1524 if (current->tgid != li->pid)
1525 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001526 if (types[i] != li->type)
1527 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001528 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001529 /*
1530 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001531 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001532 */
1533 list_del(&li->llist);
1534 cifs_del_lock_waiters(li);
1535 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001536 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001537 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001538 cur->Pid = cpu_to_le16(li->pid);
1539 cur->LengthLow = cpu_to_le32((u32)li->length);
1540 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1541 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1542 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1543 /*
1544 * We need to save a lock here to let us add it again to
1545 * the file's list if the unlock range request fails on
1546 * the server.
1547 */
1548 list_move(&li->llist, &tmp_llist);
1549 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001550 stored_rc = cifs_lockv(xid, tcon,
1551 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001552 li->type, num, 0, buf);
1553 if (stored_rc) {
1554 /*
1555 * We failed on the unlock range
1556 * request - add all locks from the tmp
1557 * list to the head of the file's list.
1558 */
1559 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001560 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001561 rc = stored_rc;
1562 } else
1563 /*
1564 * The unlock range request succeed -
1565 * free the tmp list.
1566 */
1567 cifs_free_llist(&tmp_llist);
1568 cur = buf;
1569 num = 0;
1570 } else
1571 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001572 }
1573 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001574 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001575 types[i], num, 0, buf);
1576 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001577 cifs_move_llist(&tmp_llist,
1578 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001579 rc = stored_rc;
1580 } else
1581 cifs_free_llist(&tmp_llist);
1582 }
1583 }
1584
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001585 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001586 kfree(buf);
1587 return rc;
1588}
1589
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001590static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001591cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001592 bool wait_flag, bool posix_lck, int lock, int unlock,
1593 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001594{
1595 int rc = 0;
1596 __u64 length = 1 + flock->fl_end - flock->fl_start;
1597 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1598 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001599 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001600 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001601
1602 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001603 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001604
1605 rc = cifs_posix_lock_set(file, flock);
1606 if (!rc || rc < 0)
1607 return rc;
1608
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001609 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001610 posix_lock_type = CIFS_RDLCK;
1611 else
1612 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001613
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001614 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001615 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001616
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001617 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001618 hash_lockowner(flock->fl_owner),
1619 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001620 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001621 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001622 }
1623
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001624 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001625 struct cifsLockInfo *lock;
1626
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001627 lock = cifs_lock_init(flock->fl_start, length, type,
1628 flock->fl_flags);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001629 if (!lock)
1630 return -ENOMEM;
1631
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001632 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001633 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001634 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001635 return rc;
1636 }
1637 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001638 goto out;
1639
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001640 /*
1641 * Windows 7 server can delay breaking lease from read to None
1642 * if we set a byte-range lock on a file - break it explicitly
1643 * before sending the lock to the server to be sure the next
1644 * read won't conflict with non-overlapted locks due to
1645 * pagereading.
1646 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001647 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1648 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001649 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001650 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1651 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001652 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001653 }
1654
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001655 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1656 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001657 if (rc) {
1658 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001659 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001660 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001661
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001662 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001663 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001664 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001665
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001666out:
Aurelien Aptelbc31d0c2019-03-14 18:44:16 +01001667 if (flock->fl_flags & FL_POSIX) {
1668 /*
1669 * If this is a request to remove all locks because we
1670 * are closing the file, it doesn't matter if the
1671 * unlocking failed as both cifs.ko and the SMB server
1672 * remove the lock on file close
1673 */
1674 if (rc) {
1675 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1676 if (!(flock->fl_flags & FL_CLOSE))
1677 return rc;
1678 }
Benjamin Coddington4f656362015-10-22 13:38:14 -04001679 rc = locks_lock_file_wait(file, flock);
Aurelien Aptelbc31d0c2019-03-14 18:44:16 +01001680 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001681 return rc;
1682}
1683
1684int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1685{
1686 int rc, xid;
1687 int lock = 0, unlock = 0;
1688 bool wait_flag = false;
1689 bool posix_lck = false;
1690 struct cifs_sb_info *cifs_sb;
1691 struct cifs_tcon *tcon;
1692 struct cifsInodeInfo *cinode;
1693 struct cifsFileInfo *cfile;
1694 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001695 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001696
1697 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001698 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001699
Joe Perchesf96637b2013-05-04 22:12:25 -05001700 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1701 cmd, flock->fl_flags, flock->fl_type,
1702 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001703
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001704 cfile = (struct cifsFileInfo *)file->private_data;
1705 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001706
1707 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1708 tcon->ses->server);
Al Viro7119e222014-10-22 00:25:12 -04001709 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001710 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001711 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001712
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001713 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001714 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1715 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1716 posix_lck = true;
1717 /*
1718 * BB add code here to normalize offset and length to account for
1719 * negative length which we can not accept over the wire.
1720 */
1721 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001722 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001723 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001724 return rc;
1725 }
1726
1727 if (!lock && !unlock) {
1728 /*
1729 * if no lock or unlock then nothing to do since we do not
1730 * know what it is
1731 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001732 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001733 return -EOPNOTSUPP;
1734 }
1735
1736 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1737 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001738 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 return rc;
1740}
1741
Jeff Layton597b0272012-03-23 14:40:56 -04001742/*
1743 * update the file size (if needed) after a write. Should be called with
1744 * the inode->i_lock held
1745 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001746void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001747cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1748 unsigned int bytes_written)
1749{
1750 loff_t end_of_write = offset + bytes_written;
1751
1752 if (end_of_write > cifsi->server_eof)
1753 cifsi->server_eof = end_of_write;
1754}
1755
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001756static ssize_t
1757cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1758 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759{
1760 int rc = 0;
1761 unsigned int bytes_written = 0;
1762 unsigned int total_written;
1763 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001764 struct cifs_tcon *tcon;
1765 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001766 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001767 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001768 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001769 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
Jeff Layton7da4b492010-10-15 15:34:00 -04001771 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772
Al Viro35c265e2014-08-19 20:25:34 -04001773 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1774 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001776 tcon = tlink_tcon(open_file->tlink);
1777 server = tcon->ses->server;
1778
1779 if (!server->ops->sync_write)
1780 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001781
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001782 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 for (total_written = 0; write_size > total_written;
1785 total_written += bytes_written) {
1786 rc = -EAGAIN;
1787 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001788 struct kvec iov[2];
1789 unsigned int len;
1790
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 /* we could deadlock if we called
1793 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001794 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001796 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 if (rc != 0)
1798 break;
1799 }
Steve French3e844692005-10-03 13:37:24 -07001800
David Howells2b0143b2015-03-17 22:25:59 +00001801 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001802 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001803 /* iov[0] is reserved for smb header */
1804 iov[1].iov_base = (char *)write_data + total_written;
1805 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001806 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001807 io_parms.tcon = tcon;
1808 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001809 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001810 rc = server->ops->sync_write(xid, &open_file->fid,
1811 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 }
1813 if (rc || (bytes_written == 0)) {
1814 if (total_written)
1815 break;
1816 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001817 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 return rc;
1819 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001820 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001821 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001822 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001823 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001824 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001825 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 }
1827
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001828 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829
Jeff Layton7da4b492010-10-15 15:34:00 -04001830 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001831 spin_lock(&d_inode(dentry)->i_lock);
1832 if (*offset > d_inode(dentry)->i_size)
1833 i_size_write(d_inode(dentry), *offset);
1834 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 }
David Howells2b0143b2015-03-17 22:25:59 +00001836 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001837 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 return total_written;
1839}
1840
Jeff Layton6508d902010-09-29 19:51:11 -04001841struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1842 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001843{
1844 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001845 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001846 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Layton6508d902010-09-29 19:51:11 -04001847
1848 /* only filter by fsuid on multiuser mounts */
1849 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1850 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001851
Steve French3afca262016-09-22 18:58:16 -05001852 spin_lock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001853 /* we could simply get the first_list_entry since write-only entries
1854 are always at the end of the list but since the first entry might
1855 have a close pending, we go through the whole list */
1856 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001857 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001858 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001859 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001860 if (!open_file->invalidHandle) {
1861 /* found a good file */
1862 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001863 cifsFileInfo_get(open_file);
1864 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001865 return open_file;
1866 } /* else might as well continue, and look for
1867 another, or simply have the caller reopen it
1868 again rather than trying to fix this handle */
1869 } else /* write only file */
1870 break; /* write only files are last so must be done */
1871 }
Steve French3afca262016-09-22 18:58:16 -05001872 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001873 return NULL;
1874}
Steve French630f3f0c2007-10-25 21:17:17 +00001875
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001876/* Return -EBADF if no handle is found and general rc otherwise */
1877int
1878cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
1879 struct cifsFileInfo **ret_file)
Steve French6148a742005-10-05 12:23:19 -07001880{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001881 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001882 struct cifs_sb_info *cifs_sb;
Steve French3afca262016-09-22 18:58:16 -05001883 struct cifs_tcon *tcon;
Jeff Layton2846d382008-09-22 21:33:33 -04001884 bool any_available = false;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001885 int rc = -EBADF;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001886 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001887
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001888 *ret_file = NULL;
1889
1890 /*
1891 * Having a null inode here (because mapping->host was set to zero by
1892 * the VFS or MM) should not happen but we had reports of on oops (due
1893 * to it being zero) during stress testcases so we need to check for it
1894 */
Steve French60808232006-04-22 15:53:05 +00001895
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001896 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001897 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001898 dump_stack();
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001899 return rc;
Steve French60808232006-04-22 15:53:05 +00001900 }
1901
Jeff Laytond3892292010-11-02 16:22:50 -04001902 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001903 tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Laytond3892292010-11-02 16:22:50 -04001904
Jeff Layton6508d902010-09-29 19:51:11 -04001905 /* only filter by fsuid on multiuser mounts */
1906 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1907 fsuid_only = false;
1908
Steve French3afca262016-09-22 18:58:16 -05001909 spin_lock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001910refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001911 if (refind > MAX_REOPEN_ATT) {
Steve French3afca262016-09-22 18:58:16 -05001912 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001913 return rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001914 }
Steve French6148a742005-10-05 12:23:19 -07001915 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001916 if (!any_available && open_file->pid != current->tgid)
1917 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001918 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001919 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001920 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001921 if (!open_file->invalidHandle) {
1922 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001923 cifsFileInfo_get(open_file);
1924 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001925 *ret_file = open_file;
1926 return 0;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001927 } else {
1928 if (!inv_file)
1929 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001930 }
Steve French6148a742005-10-05 12:23:19 -07001931 }
1932 }
Jeff Layton2846d382008-09-22 21:33:33 -04001933 /* couldn't find useable FH with same pid, try any available */
1934 if (!any_available) {
1935 any_available = true;
1936 goto refind_writable;
1937 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001938
1939 if (inv_file) {
1940 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001941 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001942 }
1943
Steve French3afca262016-09-22 18:58:16 -05001944 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001945
1946 if (inv_file) {
1947 rc = cifs_reopen_file(inv_file, false);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001948 if (!rc) {
1949 *ret_file = inv_file;
1950 return 0;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001951 }
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001952
1953 spin_lock(&tcon->open_file_lock);
1954 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
1955 spin_unlock(&tcon->open_file_lock);
1956 cifsFileInfo_put(inv_file);
1957 ++refind;
1958 inv_file = NULL;
1959 spin_lock(&tcon->open_file_lock);
1960 goto refind_writable;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001961 }
1962
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001963 return rc;
1964}
1965
1966struct cifsFileInfo *
1967find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
1968{
1969 struct cifsFileInfo *cfile;
1970 int rc;
1971
1972 rc = cifs_get_writable_file(cifs_inode, fsuid_only, &cfile);
1973 if (rc)
1974 cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc);
1975
1976 return cfile;
Steve French6148a742005-10-05 12:23:19 -07001977}
1978
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1980{
1981 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001982 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 char *write_data;
1984 int rc = -EFAULT;
1985 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001987 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
1989 if (!mapping || !mapping->host)
1990 return -EFAULT;
1991
1992 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993
1994 offset += (loff_t)from;
1995 write_data = kmap(page);
1996 write_data += from;
1997
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001998 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 kunmap(page);
2000 return -EIO;
2001 }
2002
2003 /* racing with truncate? */
2004 if (offset > mapping->host->i_size) {
2005 kunmap(page);
2006 return 0; /* don't care */
2007 }
2008
2009 /* check to make sure that we are not extending the file */
2010 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002011 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002013 rc = cifs_get_writable_file(CIFS_I(mapping->host), false, &open_file);
2014 if (!rc) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04002015 bytes_written = cifs_write(open_file, open_file->pid,
2016 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04002017 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07002019 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002020 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07002021 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002022 else if (bytes_written < 0)
2023 rc = bytes_written;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002024 else
2025 rc = -EFAULT;
Steve French6148a742005-10-05 12:23:19 -07002026 } else {
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002027 cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
2028 if (!is_retryable_error(rc))
2029 rc = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 }
2031
2032 kunmap(page);
2033 return rc;
2034}
2035
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002036static struct cifs_writedata *
2037wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2038 pgoff_t end, pgoff_t *index,
2039 unsigned int *found_pages)
2040{
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002041 struct cifs_writedata *wdata;
2042
2043 wdata = cifs_writedata_alloc((unsigned int)tofind,
2044 cifs_writev_complete);
2045 if (!wdata)
2046 return NULL;
2047
Jan Kara9c19a9c2017-11-15 17:35:26 -08002048 *found_pages = find_get_pages_range_tag(mapping, index, end,
2049 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002050 return wdata;
2051}
2052
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002053static unsigned int
2054wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2055 struct address_space *mapping,
2056 struct writeback_control *wbc,
2057 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2058{
2059 unsigned int nr_pages = 0, i;
2060 struct page *page;
2061
2062 for (i = 0; i < found_pages; i++) {
2063 page = wdata->pages[i];
2064 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002065 * At this point we hold neither the i_pages lock nor the
2066 * page lock: the page may be truncated or invalidated
2067 * (changing page->mapping to NULL), or even swizzled
2068 * back from swapper_space to tmpfs file mapping
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002069 */
2070
2071 if (nr_pages == 0)
2072 lock_page(page);
2073 else if (!trylock_page(page))
2074 break;
2075
2076 if (unlikely(page->mapping != mapping)) {
2077 unlock_page(page);
2078 break;
2079 }
2080
2081 if (!wbc->range_cyclic && page->index > end) {
2082 *done = true;
2083 unlock_page(page);
2084 break;
2085 }
2086
2087 if (*next && (page->index != *next)) {
2088 /* Not next consecutive page */
2089 unlock_page(page);
2090 break;
2091 }
2092
2093 if (wbc->sync_mode != WB_SYNC_NONE)
2094 wait_on_page_writeback(page);
2095
2096 if (PageWriteback(page) ||
2097 !clear_page_dirty_for_io(page)) {
2098 unlock_page(page);
2099 break;
2100 }
2101
2102 /*
2103 * This actually clears the dirty bit in the radix tree.
2104 * See cifs_writepage() for more commentary.
2105 */
2106 set_page_writeback(page);
2107 if (page_offset(page) >= i_size_read(mapping->host)) {
2108 *done = true;
2109 unlock_page(page);
2110 end_page_writeback(page);
2111 break;
2112 }
2113
2114 wdata->pages[i] = page;
2115 *next = page->index + 1;
2116 ++nr_pages;
2117 }
2118
2119 /* reset index to refind any pages skipped */
2120 if (nr_pages == 0)
2121 *index = wdata->pages[0]->index + 1;
2122
2123 /* put any pages we aren't going to use */
2124 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002125 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002126 wdata->pages[i] = NULL;
2127 }
2128
2129 return nr_pages;
2130}
2131
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002132static int
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002133wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2134 struct address_space *mapping, struct writeback_control *wbc)
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002135{
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002136 int rc;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002137 struct TCP_Server_Info *server =
2138 tlink_tcon(wdata->cfile->tlink)->ses->server;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002139
2140 wdata->sync_mode = wbc->sync_mode;
2141 wdata->nr_pages = nr_pages;
2142 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002143 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002144 wdata->tailsz = min(i_size_read(mapping->host) -
2145 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002146 (loff_t)PAGE_SIZE);
2147 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002148 wdata->pid = wdata->cfile->pid;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002149
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002150 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2151 if (rc)
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002152 return rc;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002153
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002154 if (wdata->cfile->invalidHandle)
2155 rc = -EAGAIN;
2156 else
2157 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002158
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002159 return rc;
2160}
2161
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002163 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164{
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002165 struct inode *inode = mapping->host;
2166 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002167 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002168 bool done = false, scanned = false, range_whole = false;
2169 pgoff_t end, index;
2170 struct cifs_writedata *wdata;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002171 struct cifsFileInfo *cfile = NULL;
Steve French37c0eb42005-10-05 14:50:29 -07002172 int rc = 0;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002173 int saved_rc = 0;
Steve French0cb012d2018-10-11 01:01:02 -05002174 unsigned int xid;
Steve French50c2f752007-07-13 00:33:32 +00002175
Steve French37c0eb42005-10-05 14:50:29 -07002176 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002177 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002178 * one page at a time via cifs_writepage
2179 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002180 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002181 return generic_writepages(mapping, wbc);
2182
Steve French0cb012d2018-10-11 01:01:02 -05002183 xid = get_xid();
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002184 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002185 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002186 end = -1;
2187 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002188 index = wbc->range_start >> PAGE_SHIFT;
2189 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002190 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002191 range_whole = true;
2192 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002193 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002194 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002195retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002196 while (!done && index <= end) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002197 unsigned int i, nr_pages, found_pages, wsize;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002198 pgoff_t next = 0, tofind, saved_index = index;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002199 struct cifs_credits credits_on_stack;
2200 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002201 int get_file_rc = 0;
Steve French37c0eb42005-10-05 14:50:29 -07002202
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002203 if (cfile)
2204 cifsFileInfo_put(cfile);
2205
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002206 rc = cifs_get_writable_file(CIFS_I(inode), false, &cfile);
2207
2208 /* in case of an error store it to return later */
2209 if (rc)
2210 get_file_rc = rc;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002211
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002212 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002213 &wsize, credits);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002214 if (rc != 0) {
2215 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002216 break;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002217 }
Steve French37c0eb42005-10-05 14:50:29 -07002218
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002219 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002220
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002221 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2222 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002223 if (!wdata) {
2224 rc = -ENOMEM;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002225 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002226 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002227 break;
2228 }
2229
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002230 if (found_pages == 0) {
2231 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002232 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002233 break;
2234 }
2235
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002236 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2237 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002238
2239 /* nothing to write? */
2240 if (nr_pages == 0) {
2241 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002242 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002243 continue;
2244 }
2245
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002246 wdata->credits = credits_on_stack;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002247 wdata->cfile = cfile;
2248 cfile = NULL;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002249
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002250 if (!wdata->cfile) {
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002251 cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
2252 get_file_rc);
2253 if (is_retryable_error(get_file_rc))
2254 rc = get_file_rc;
2255 else
2256 rc = -EBADF;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002257 } else
2258 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002259
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002260 for (i = 0; i < nr_pages; ++i)
2261 unlock_page(wdata->pages[i]);
2262
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002263 /* send failure -- clean up the mess */
2264 if (rc != 0) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002265 add_credits_and_wake_if(server, &wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002266 for (i = 0; i < nr_pages; ++i) {
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002267 if (is_retryable_error(rc))
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002268 redirty_page_for_writepage(wbc,
2269 wdata->pages[i]);
2270 else
2271 SetPageError(wdata->pages[i]);
2272 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002273 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002274 }
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002275 if (!is_retryable_error(rc))
Jeff Layton941b8532011-01-11 07:24:01 -05002276 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002277 }
2278 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002279
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002280 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2281 index = saved_index;
2282 continue;
2283 }
2284
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002285 /* Return immediately if we received a signal during writing */
2286 if (is_interrupt_error(rc)) {
2287 done = true;
2288 break;
2289 }
2290
2291 if (rc != 0 && saved_rc == 0)
2292 saved_rc = rc;
2293
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002294 wbc->nr_to_write -= nr_pages;
2295 if (wbc->nr_to_write <= 0)
2296 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002297
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002298 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002299 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002300
Steve French37c0eb42005-10-05 14:50:29 -07002301 if (!scanned && !done) {
2302 /*
2303 * We hit the last page and there is more work to be done: wrap
2304 * back to the start of the file
2305 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002306 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002307 index = 0;
2308 goto retry;
2309 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002310
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002311 if (saved_rc != 0)
2312 rc = saved_rc;
2313
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002314 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002315 mapping->writeback_index = index;
2316
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002317 if (cfile)
2318 cifsFileInfo_put(cfile);
Steve French0cb012d2018-10-11 01:01:02 -05002319 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 return rc;
2321}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002323static int
2324cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002326 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002327 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002329 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002331 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002332 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002333 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002334
2335 /*
2336 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2337 *
2338 * A writepage() implementation always needs to do either this,
2339 * or re-dirty the page with "redirty_page_for_writepage()" in
2340 * the case of a failure.
2341 *
2342 * Just unlocking the page will cause the radix tree tag-bits
2343 * to fail to update with the state of the page correctly.
2344 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002345 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002346retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002347 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002348 if (is_retryable_error(rc)) {
2349 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
Jeff Layton97b37f22017-05-25 06:59:52 -04002350 goto retry_write;
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002351 redirty_page_for_writepage(wbc, page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002352 } else if (rc != 0) {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002353 SetPageError(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002354 mapping_set_error(page->mapping, rc);
2355 } else {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002356 SetPageUptodate(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002357 }
Linus Torvaldscb876f42006-12-23 16:19:07 -08002358 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002359 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002360 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 return rc;
2362}
2363
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002364static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2365{
2366 int rc = cifs_writepage_locked(page, wbc);
2367 unlock_page(page);
2368 return rc;
2369}
2370
Nick Piggind9414772008-09-24 11:32:59 -04002371static int cifs_write_end(struct file *file, struct address_space *mapping,
2372 loff_t pos, unsigned len, unsigned copied,
2373 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374{
Nick Piggind9414772008-09-24 11:32:59 -04002375 int rc;
2376 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002377 struct cifsFileInfo *cfile = file->private_data;
2378 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2379 __u32 pid;
2380
2381 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2382 pid = cfile->pid;
2383 else
2384 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385
Joe Perchesf96637b2013-05-04 22:12:25 -05002386 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002387 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002388
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002389 if (PageChecked(page)) {
2390 if (copied == len)
2391 SetPageUptodate(page);
2392 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002393 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002394 SetPageUptodate(page);
2395
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002397 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002398 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002399 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002400
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002401 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 /* this is probably better than directly calling
2403 partialpage_write since in this function the file handle is
2404 known which we might as well leverage */
2405 /* BB check if anything else missing out of ppw
2406 such as updating last write time */
2407 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002408 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002409 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002411
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002412 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002413 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002414 rc = copied;
2415 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002416 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 }
2418
Nick Piggind9414772008-09-24 11:32:59 -04002419 if (rc > 0) {
2420 spin_lock(&inode->i_lock);
2421 if (pos > inode->i_size)
2422 i_size_write(inode, pos);
2423 spin_unlock(&inode->i_lock);
2424 }
2425
2426 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002427 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002428
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 return rc;
2430}
2431
Josef Bacik02c24a82011-07-16 20:44:56 -04002432int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2433 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002435 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002437 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002438 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002439 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002440 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002441 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002443 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002444 if (rc)
2445 return rc;
Josef Bacik02c24a82011-07-16 20:44:56 -04002446
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002447 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448
Al Viro35c265e2014-08-19 20:25:34 -04002449 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2450 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002451
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002452 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002453 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002454 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002455 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002456 rc = 0; /* don't care about it in fsync */
2457 }
2458 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002459
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002460 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002461 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2462 server = tcon->ses->server;
2463 if (server->ops->flush)
2464 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2465 else
2466 rc = -ENOSYS;
2467 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002468
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002469 free_xid(xid);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002470 return rc;
2471}
2472
Josef Bacik02c24a82011-07-16 20:44:56 -04002473int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002474{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002475 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002476 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002477 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002478 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002479 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002480 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002481
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002482 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002483 if (rc)
2484 return rc;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002485
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002486 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002487
Al Viro35c265e2014-08-19 20:25:34 -04002488 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2489 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002490
2491 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002492 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2493 server = tcon->ses->server;
2494 if (server->ops->flush)
2495 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2496 else
2497 rc = -ENOSYS;
2498 }
Steve Frenchb298f222009-02-21 21:17:43 +00002499
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002500 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 return rc;
2502}
2503
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504/*
2505 * As file closes, flush all cached write data for this inode checking
2506 * for write behind errors.
2507 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002508int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509{
Al Viro496ad9a2013-01-23 17:07:38 -05002510 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 int rc = 0;
2512
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002513 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002514 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002515
Joe Perchesf96637b2013-05-04 22:12:25 -05002516 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517
2518 return rc;
2519}
2520
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002521static int
2522cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2523{
2524 int rc = 0;
2525 unsigned long i;
2526
2527 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002528 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002529 if (!pages[i]) {
2530 /*
2531 * save number of pages we have already allocated and
2532 * return with ENOMEM error
2533 */
2534 num_pages = i;
2535 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002536 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002537 }
2538 }
2539
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002540 if (rc) {
2541 for (i = 0; i < num_pages; i++)
2542 put_page(pages[i]);
2543 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002544 return rc;
2545}
2546
2547static inline
2548size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2549{
2550 size_t num_pages;
2551 size_t clen;
2552
2553 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002554 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002555
2556 if (cur_len)
2557 *cur_len = clen;
2558
2559 return num_pages;
2560}
2561
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002562static void
Steve French4a5c80d2014-02-07 20:45:12 -06002563cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002564{
2565 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002566 struct cifs_writedata *wdata = container_of(refcount,
2567 struct cifs_writedata, refcount);
2568
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002569 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
Steve French4a5c80d2014-02-07 20:45:12 -06002570 for (i = 0; i < wdata->nr_pages; i++)
2571 put_page(wdata->pages[i]);
2572 cifs_writedata_release(refcount);
2573}
2574
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002575static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2576
Steve French4a5c80d2014-02-07 20:45:12 -06002577static void
2578cifs_uncached_writev_complete(struct work_struct *work)
2579{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002580 struct cifs_writedata *wdata = container_of(work,
2581 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002582 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002583 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2584
2585 spin_lock(&inode->i_lock);
2586 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2587 if (cifsi->server_eof > inode->i_size)
2588 i_size_write(inode, cifsi->server_eof);
2589 spin_unlock(&inode->i_lock);
2590
2591 complete(&wdata->done);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002592 collect_uncached_write_data(wdata->ctx);
2593 /* the below call can possibly free the last ref to aio ctx */
Steve French4a5c80d2014-02-07 20:45:12 -06002594 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002595}
2596
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002597static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002598wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2599 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002600{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002601 size_t save_len, copied, bytes, cur_len = *len;
2602 unsigned long i, nr_pages = *num_pages;
2603
2604 save_len = cur_len;
2605 for (i = 0; i < nr_pages; i++) {
2606 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2607 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2608 cur_len -= copied;
2609 /*
2610 * If we didn't copy as much as we expected, then that
2611 * may mean we trod into an unmapped area. Stop copying
2612 * at that point. On the next pass through the big
2613 * loop, we'll likely end up getting a zero-length
2614 * write and bailing out of it.
2615 */
2616 if (copied < bytes)
2617 break;
2618 }
2619 cur_len = save_len - cur_len;
2620 *len = cur_len;
2621
2622 /*
2623 * If we have no data to send, then that probably means that
2624 * the copy above failed altogether. That's most likely because
2625 * the address in the iovec was bogus. Return -EFAULT and let
2626 * the caller free anything we allocated and bail out.
2627 */
2628 if (!cur_len)
2629 return -EFAULT;
2630
2631 /*
2632 * i + 1 now represents the number of pages we actually used in
2633 * the copy phase above.
2634 */
2635 *num_pages = i + 1;
2636 return 0;
2637}
2638
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002639static int
Long Li8c5f9c12018-10-31 22:13:10 +00002640cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
2641 struct cifs_aio_ctx *ctx)
2642{
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002643 unsigned int wsize;
2644 struct cifs_credits credits;
Long Li8c5f9c12018-10-31 22:13:10 +00002645 int rc;
2646 struct TCP_Server_Info *server =
2647 tlink_tcon(wdata->cfile->tlink)->ses->server;
2648
Long Li8c5f9c12018-10-31 22:13:10 +00002649 do {
Long Lid53e2922019-03-15 07:54:59 +00002650 if (wdata->cfile->invalidHandle) {
Long Li8c5f9c12018-10-31 22:13:10 +00002651 rc = cifs_reopen_file(wdata->cfile, false);
Long Lid53e2922019-03-15 07:54:59 +00002652 if (rc == -EAGAIN)
2653 continue;
2654 else if (rc)
2655 break;
2656 }
2657
2658
2659 /*
2660 * Wait for credits to resend this wdata.
2661 * Note: we are attempting to resend the whole wdata not in
2662 * segments
2663 */
2664 do {
2665 rc = server->ops->wait_mtu_credits(server, wdata->bytes,
2666 &wsize, &credits);
2667 if (rc)
2668 goto fail;
2669
2670 if (wsize < wdata->bytes) {
2671 add_credits_and_wake_if(server, &credits, 0);
2672 msleep(1000);
2673 }
2674 } while (wsize < wdata->bytes);
2675 wdata->credits = credits;
2676
2677 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2678
2679 if (!rc) {
2680 if (wdata->cfile->invalidHandle)
2681 rc = -EAGAIN;
2682 else
2683 rc = server->ops->async_writev(wdata,
Long Li8c5f9c12018-10-31 22:13:10 +00002684 cifs_uncached_writedata_release);
Long Lid53e2922019-03-15 07:54:59 +00002685 }
Long Li8c5f9c12018-10-31 22:13:10 +00002686
Long Lid53e2922019-03-15 07:54:59 +00002687 /* If the write was successfully sent, we are done */
2688 if (!rc) {
2689 list_add_tail(&wdata->list, wdata_list);
2690 return 0;
2691 }
Long Li8c5f9c12018-10-31 22:13:10 +00002692
Long Lid53e2922019-03-15 07:54:59 +00002693 /* Roll back credits and retry if needed */
2694 add_credits_and_wake_if(server, &wdata->credits, 0);
2695 } while (rc == -EAGAIN);
2696
2697fail:
Long Li8c5f9c12018-10-31 22:13:10 +00002698 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Long Li8c5f9c12018-10-31 22:13:10 +00002699 return rc;
2700}
2701
2702static int
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002703cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2704 struct cifsFileInfo *open_file,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002705 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2706 struct cifs_aio_ctx *ctx)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002707{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002708 int rc = 0;
2709 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002710 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002711 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002712 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002713 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002714 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002715 struct TCP_Server_Info *server;
Long Li8c5f9c12018-10-31 22:13:10 +00002716 struct page **pagevec;
2717 size_t start;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002718 unsigned int xid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002719
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002720 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2721 pid = open_file->pid;
2722 else
2723 pid = current->tgid;
2724
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002725 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002726 xid = get_xid();
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002727
2728 do {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002729 unsigned int wsize;
2730 struct cifs_credits credits_on_stack;
2731 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002732
Pavel Shilovsky3e952992019-01-25 11:59:01 -08002733 if (open_file->invalidHandle) {
2734 rc = cifs_reopen_file(open_file, false);
2735 if (rc == -EAGAIN)
2736 continue;
2737 else if (rc)
2738 break;
2739 }
2740
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002741 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002742 &wsize, credits);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002743 if (rc)
2744 break;
2745
Long Lib6bc8a72018-12-16 23:17:04 +00002746 cur_len = min_t(const size_t, len, wsize);
2747
Long Li8c5f9c12018-10-31 22:13:10 +00002748 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05002749 ssize_t result;
2750
2751 result = iov_iter_get_pages_alloc(
Long Lib6bc8a72018-12-16 23:17:04 +00002752 from, &pagevec, cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05002753 if (result < 0) {
Long Li8c5f9c12018-10-31 22:13:10 +00002754 cifs_dbg(VFS,
2755 "direct_writev couldn't get user pages "
2756 "(rc=%zd) iter type %d iov_offset %zd "
2757 "count %zd\n",
Steve Frenchb98e26d2018-11-01 10:54:32 -05002758 result, from->type,
Long Li8c5f9c12018-10-31 22:13:10 +00002759 from->iov_offset, from->count);
2760 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00002761
2762 rc = result;
2763 add_credits_and_wake_if(server, credits, 0);
Long Li8c5f9c12018-10-31 22:13:10 +00002764 break;
2765 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05002766 cur_len = (size_t)result;
Long Li8c5f9c12018-10-31 22:13:10 +00002767 iov_iter_advance(from, cur_len);
2768
2769 nr_pages =
2770 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
2771
2772 wdata = cifs_writedata_direct_alloc(pagevec,
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002773 cifs_uncached_writev_complete);
Long Li8c5f9c12018-10-31 22:13:10 +00002774 if (!wdata) {
2775 rc = -ENOMEM;
2776 add_credits_and_wake_if(server, credits, 0);
2777 break;
2778 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002779
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002780
Long Li8c5f9c12018-10-31 22:13:10 +00002781 wdata->page_offset = start;
2782 wdata->tailsz =
2783 nr_pages > 1 ?
2784 cur_len - (PAGE_SIZE - start) -
2785 (nr_pages - 2) * PAGE_SIZE :
2786 cur_len;
2787 } else {
2788 nr_pages = get_numpages(wsize, len, &cur_len);
2789 wdata = cifs_writedata_alloc(nr_pages,
2790 cifs_uncached_writev_complete);
2791 if (!wdata) {
2792 rc = -ENOMEM;
2793 add_credits_and_wake_if(server, credits, 0);
2794 break;
2795 }
Jeff Layton5d81de82014-02-14 07:20:35 -05002796
Long Li8c5f9c12018-10-31 22:13:10 +00002797 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2798 if (rc) {
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08002799 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00002800 kfree(wdata);
2801 add_credits_and_wake_if(server, credits, 0);
2802 break;
2803 }
2804
2805 num_pages = nr_pages;
2806 rc = wdata_fill_from_iovec(
2807 wdata, from, &cur_len, &num_pages);
2808 if (rc) {
2809 for (i = 0; i < nr_pages; i++)
2810 put_page(wdata->pages[i]);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08002811 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00002812 kfree(wdata);
2813 add_credits_and_wake_if(server, credits, 0);
2814 break;
2815 }
2816
2817 /*
2818 * Bring nr_pages down to the number of pages we
2819 * actually used, and free any pages that we didn't use.
2820 */
2821 for ( ; nr_pages > num_pages; nr_pages--)
2822 put_page(wdata->pages[nr_pages - 1]);
2823
2824 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2825 }
Jeff Layton5d81de82014-02-14 07:20:35 -05002826
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002827 wdata->sync_mode = WB_SYNC_ALL;
2828 wdata->nr_pages = nr_pages;
2829 wdata->offset = (__u64)offset;
2830 wdata->cfile = cifsFileInfo_get(open_file);
2831 wdata->pid = pid;
2832 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002833 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002834 wdata->credits = credits_on_stack;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002835 wdata->ctx = ctx;
2836 kref_get(&ctx->refcount);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002837
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002838 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2839
2840 if (!rc) {
2841 if (wdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08002842 rc = -EAGAIN;
2843 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002844 rc = server->ops->async_writev(wdata,
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002845 cifs_uncached_writedata_release);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002846 }
2847
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002848 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002849 add_credits_and_wake_if(server, &wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002850 kref_put(&wdata->refcount,
2851 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002852 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002853 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002854 iov_iter_advance(from, offset - saved_offset);
2855 continue;
2856 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002857 break;
2858 }
2859
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002860 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002861 offset += cur_len;
2862 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002863 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002864
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002865 free_xid(xid);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002866 return rc;
2867}
2868
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002869static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2870{
2871 struct cifs_writedata *wdata, *tmp;
2872 struct cifs_tcon *tcon;
2873 struct cifs_sb_info *cifs_sb;
2874 struct dentry *dentry = ctx->cfile->dentry;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002875 int rc;
2876
2877 tcon = tlink_tcon(ctx->cfile->tlink);
2878 cifs_sb = CIFS_SB(dentry->d_sb);
2879
2880 mutex_lock(&ctx->aio_mutex);
2881
2882 if (list_empty(&ctx->list)) {
2883 mutex_unlock(&ctx->aio_mutex);
2884 return;
2885 }
2886
2887 rc = ctx->rc;
2888 /*
2889 * Wait for and collect replies for any successful sends in order of
2890 * increasing offset. Once an error is hit, then return without waiting
2891 * for any more replies.
2892 */
2893restart_loop:
2894 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
2895 if (!rc) {
2896 if (!try_wait_for_completion(&wdata->done)) {
2897 mutex_unlock(&ctx->aio_mutex);
2898 return;
2899 }
2900
2901 if (wdata->result)
2902 rc = wdata->result;
2903 else
2904 ctx->total_len += wdata->bytes;
2905
2906 /* resend call if it's a retryable error */
2907 if (rc == -EAGAIN) {
2908 struct list_head tmp_list;
2909 struct iov_iter tmp_from = ctx->iter;
2910
2911 INIT_LIST_HEAD(&tmp_list);
2912 list_del_init(&wdata->list);
2913
Long Li8c5f9c12018-10-31 22:13:10 +00002914 if (ctx->direct_io)
2915 rc = cifs_resend_wdata(
2916 wdata, &tmp_list, ctx);
2917 else {
2918 iov_iter_advance(&tmp_from,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002919 wdata->offset - ctx->pos);
2920
Long Li8c5f9c12018-10-31 22:13:10 +00002921 rc = cifs_write_from_iter(wdata->offset,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002922 wdata->bytes, &tmp_from,
2923 ctx->cfile, cifs_sb, &tmp_list,
2924 ctx);
Long Lid53e2922019-03-15 07:54:59 +00002925
2926 kref_put(&wdata->refcount,
2927 cifs_uncached_writedata_release);
Long Li8c5f9c12018-10-31 22:13:10 +00002928 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002929
2930 list_splice(&tmp_list, &ctx->list);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002931 goto restart_loop;
2932 }
2933 }
2934 list_del_init(&wdata->list);
2935 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2936 }
2937
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002938 cifs_stats_bytes_written(tcon, ctx->total_len);
2939 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
2940
2941 ctx->rc = (rc == 0) ? ctx->total_len : rc;
2942
2943 mutex_unlock(&ctx->aio_mutex);
2944
2945 if (ctx->iocb && ctx->iocb->ki_complete)
2946 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
2947 else
2948 complete(&ctx->done);
2949}
2950
Long Li8c5f9c12018-10-31 22:13:10 +00002951static ssize_t __cifs_writev(
2952 struct kiocb *iocb, struct iov_iter *from, bool direct)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002953{
Al Viroe9d15932015-04-06 22:44:11 -04002954 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002955 ssize_t total_written = 0;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002956 struct cifsFileInfo *cfile;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002957 struct cifs_tcon *tcon;
2958 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002959 struct cifs_aio_ctx *ctx;
Al Virofc56b982016-09-21 18:18:23 -04002960 struct iov_iter saved_from = *from;
Long Li8c5f9c12018-10-31 22:13:10 +00002961 size_t len = iov_iter_count(from);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002962 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002963
Al Viroe9d15932015-04-06 22:44:11 -04002964 /*
Long Li8c5f9c12018-10-31 22:13:10 +00002965 * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
2966 * In this case, fall back to non-direct write function.
2967 * this could be improved by getting pages directly in ITER_KVEC
Al Viroe9d15932015-04-06 22:44:11 -04002968 */
Long Li8c5f9c12018-10-31 22:13:10 +00002969 if (direct && from->type & ITER_KVEC) {
2970 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
2971 direct = false;
2972 }
Al Viroe9d15932015-04-06 22:44:11 -04002973
Al Viro3309dd02015-04-09 12:55:47 -04002974 rc = generic_write_checks(iocb, from);
2975 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002976 return rc;
2977
Al Viro7119e222014-10-22 00:25:12 -04002978 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002979 cfile = file->private_data;
2980 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002981
2982 if (!tcon->ses->server->ops->async_writev)
2983 return -ENOSYS;
2984
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002985 ctx = cifs_aio_ctx_alloc();
2986 if (!ctx)
2987 return -ENOMEM;
2988
2989 ctx->cfile = cifsFileInfo_get(cfile);
2990
2991 if (!is_sync_kiocb(iocb))
2992 ctx->iocb = iocb;
2993
2994 ctx->pos = iocb->ki_pos;
2995
Long Li8c5f9c12018-10-31 22:13:10 +00002996 if (direct) {
2997 ctx->direct_io = true;
2998 ctx->iter = *from;
2999 ctx->len = len;
3000 } else {
3001 rc = setup_aio_ctx_iter(ctx, from, WRITE);
3002 if (rc) {
3003 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3004 return rc;
3005 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003006 }
3007
3008 /* grab a lock here due to read response handlers can access ctx */
3009 mutex_lock(&ctx->aio_mutex);
3010
3011 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
3012 cfile, cifs_sb, &ctx->list, ctx);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003013
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003014 /*
3015 * If at least one write was successfully sent, then discard any rc
3016 * value from the later writes. If the other write succeeds, then
3017 * we'll end up returning whatever was written. If it fails, then
3018 * we'll get a new rc value from that.
3019 */
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003020 if (!list_empty(&ctx->list))
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003021 rc = 0;
3022
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003023 mutex_unlock(&ctx->aio_mutex);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003024
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003025 if (rc) {
3026 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3027 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003028 }
3029
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003030 if (!is_sync_kiocb(iocb)) {
3031 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3032 return -EIOCBQUEUED;
3033 }
3034
3035 rc = wait_for_completion_killable(&ctx->done);
3036 if (rc) {
3037 mutex_lock(&ctx->aio_mutex);
3038 ctx->rc = rc = -EINTR;
3039 total_written = ctx->total_len;
3040 mutex_unlock(&ctx->aio_mutex);
3041 } else {
3042 rc = ctx->rc;
3043 total_written = ctx->total_len;
3044 }
3045
3046 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3047
Al Viroe9d15932015-04-06 22:44:11 -04003048 if (unlikely(!total_written))
3049 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003050
Al Viroe9d15932015-04-06 22:44:11 -04003051 iocb->ki_pos += total_written;
Al Viroe9d15932015-04-06 22:44:11 -04003052 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003053}
3054
Long Li8c5f9c12018-10-31 22:13:10 +00003055ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
3056{
3057 return __cifs_writev(iocb, from, true);
3058}
3059
3060ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
3061{
3062 return __cifs_writev(iocb, from, false);
3063}
3064
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003065static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003066cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003067{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003068 struct file *file = iocb->ki_filp;
3069 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
3070 struct inode *inode = file->f_mapping->host;
3071 struct cifsInodeInfo *cinode = CIFS_I(inode);
3072 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04003073 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003074
Rabin Vincent966681c2017-06-29 16:01:42 +02003075 inode_lock(inode);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003076 /*
3077 * We need to hold the sem to be sure nobody modifies lock list
3078 * with a brlock that prevents writing.
3079 */
3080 down_read(&cinode->lock_sem);
Al Viro5f380c72015-04-07 11:28:12 -04003081
Al Viro3309dd02015-04-09 12:55:47 -04003082 rc = generic_write_checks(iocb, from);
3083 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04003084 goto out;
3085
Al Viro5f380c72015-04-07 11:28:12 -04003086 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Ronnie Sahlberg96457592018-10-04 09:24:38 +10003087 server->vals->exclusive_lock_type, 0,
3088 NULL, CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04003089 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04003090 else
3091 rc = -EACCES;
3092out:
Rabin Vincent966681c2017-06-29 16:01:42 +02003093 up_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05003094 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04003095
Christoph Hellwige2592212016-04-07 08:52:01 -07003096 if (rc > 0)
3097 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003098 return rc;
3099}
3100
3101ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003102cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003103{
Al Viro496ad9a2013-01-23 17:07:38 -05003104 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003105 struct cifsInodeInfo *cinode = CIFS_I(inode);
3106 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3107 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3108 iocb->ki_filp->private_data;
3109 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003110 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003111
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003112 written = cifs_get_writer(cinode);
3113 if (written)
3114 return written;
3115
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003116 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003117 if (cap_unix(tcon->ses) &&
3118 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003119 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04003120 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003121 goto out;
3122 }
Al Viro3dae8752014-04-03 12:05:17 -04003123 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003124 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04003125 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003126 /*
3127 * For non-oplocked files in strict cache mode we need to write the data
3128 * to the server exactly from the pos to pos+len-1 rather than flush all
3129 * affected pages because it may cause a error with mandatory locks on
3130 * these pages but not on the region from pos to ppos+len-1.
3131 */
Al Viro3dae8752014-04-03 12:05:17 -04003132 written = cifs_user_writev(iocb, from);
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003133 if (CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003134 /*
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003135 * We have read level caching and we have just sent a write
3136 * request to the server thus making data in the cache stale.
3137 * Zap the cache and set oplock/lease level to NONE to avoid
3138 * reading stale data from the cache. All subsequent read
3139 * operations will read new data from the server.
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003140 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003141 cifs_zap_mapping(inode);
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003142 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05003143 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003144 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003145 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003146out:
3147 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003148 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003149}
3150
Jeff Layton0471ca32012-05-16 07:13:16 -04003151static struct cifs_readdata *
Long Lif9f5aca2018-05-30 12:47:54 -07003152cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04003153{
3154 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003155
Long Lif9f5aca2018-05-30 12:47:54 -07003156 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04003157 if (rdata != NULL) {
Long Lif9f5aca2018-05-30 12:47:54 -07003158 rdata->pages = pages;
Jeff Layton6993f742012-05-16 07:13:17 -04003159 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003160 INIT_LIST_HEAD(&rdata->list);
3161 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04003162 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04003163 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003164
Jeff Layton0471ca32012-05-16 07:13:16 -04003165 return rdata;
3166}
3167
Long Lif9f5aca2018-05-30 12:47:54 -07003168static struct cifs_readdata *
3169cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
3170{
3171 struct page **pages =
Kees Cook6396bb22018-06-12 14:03:40 -07003172 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Long Lif9f5aca2018-05-30 12:47:54 -07003173 struct cifs_readdata *ret = NULL;
3174
3175 if (pages) {
3176 ret = cifs_readdata_direct_alloc(pages, complete);
3177 if (!ret)
3178 kfree(pages);
3179 }
3180
3181 return ret;
3182}
3183
Jeff Layton6993f742012-05-16 07:13:17 -04003184void
3185cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04003186{
Jeff Layton6993f742012-05-16 07:13:17 -04003187 struct cifs_readdata *rdata = container_of(refcount,
3188 struct cifs_readdata, refcount);
Long Libd3dcc62017-11-22 17:38:47 -07003189#ifdef CONFIG_CIFS_SMB_DIRECT
3190 if (rdata->mr) {
3191 smbd_deregister_mr(rdata->mr);
3192 rdata->mr = NULL;
3193 }
3194#endif
Jeff Layton6993f742012-05-16 07:13:17 -04003195 if (rdata->cfile)
3196 cifsFileInfo_put(rdata->cfile);
3197
Long Lif9f5aca2018-05-30 12:47:54 -07003198 kvfree(rdata->pages);
Jeff Layton0471ca32012-05-16 07:13:16 -04003199 kfree(rdata);
3200}
3201
Jeff Layton2a1bb132012-05-16 07:13:17 -04003202static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003203cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04003204{
3205 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003206 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04003207 unsigned int i;
3208
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003209 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04003210 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3211 if (!page) {
3212 rc = -ENOMEM;
3213 break;
3214 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003215 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04003216 }
3217
3218 if (rc) {
Roberto Bergantinos Corpas31fad7d2019-05-28 09:38:14 +02003219 unsigned int nr_page_failed = i;
3220
3221 for (i = 0; i < nr_page_failed; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003222 put_page(rdata->pages[i]);
3223 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04003224 }
3225 }
3226 return rc;
3227}
3228
3229static void
3230cifs_uncached_readdata_release(struct kref *refcount)
3231{
Jeff Layton1c892542012-05-16 07:13:17 -04003232 struct cifs_readdata *rdata = container_of(refcount,
3233 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003234 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003235
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003236 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003237 for (i = 0; i < rdata->nr_pages; i++) {
3238 put_page(rdata->pages[i]);
Jeff Layton1c892542012-05-16 07:13:17 -04003239 }
3240 cifs_readdata_release(refcount);
3241}
3242
Jeff Layton1c892542012-05-16 07:13:17 -04003243/**
3244 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3245 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05003246 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04003247 *
3248 * This function copies data from a list of pages in a readdata response into
3249 * an array of iovecs. It will first calculate where the data should go
3250 * based on the info in the readdata and then copy the data into that spot.
3251 */
Al Viro7f25bba2014-02-04 14:07:43 -05003252static int
3253cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04003254{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04003255 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003256 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003257
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003258 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003259 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02003260 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003261 size_t written;
3262
David Howells00e23702018-10-22 13:07:28 +01003263 if (unlikely(iov_iter_is_pipe(iter))) {
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003264 void *addr = kmap_atomic(page);
3265
3266 written = copy_to_iter(addr, copy, iter);
3267 kunmap_atomic(addr);
3268 } else
3269 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05003270 remaining -= written;
3271 if (written < copy && iov_iter_count(iter) > 0)
3272 break;
Jeff Layton1c892542012-05-16 07:13:17 -04003273 }
Al Viro7f25bba2014-02-04 14:07:43 -05003274 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04003275}
3276
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003277static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3278
Jeff Layton1c892542012-05-16 07:13:17 -04003279static void
3280cifs_uncached_readv_complete(struct work_struct *work)
3281{
3282 struct cifs_readdata *rdata = container_of(work,
3283 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04003284
3285 complete(&rdata->done);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003286 collect_uncached_read_data(rdata->ctx);
3287 /* the below call can possibly free the last ref to aio ctx */
Jeff Layton1c892542012-05-16 07:13:17 -04003288 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3289}
3290
3291static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003292uncached_fill_pages(struct TCP_Server_Info *server,
3293 struct cifs_readdata *rdata, struct iov_iter *iter,
3294 unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04003295{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003296 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003297 unsigned int i;
3298 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003299 unsigned int page_offset = rdata->page_offset;
Jeff Layton1c892542012-05-16 07:13:17 -04003300
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003301 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07003302 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003303 for (i = 0; i < nr_pages; i++) {
3304 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05003305 size_t n;
Long Li1dbe3462018-05-30 12:47:55 -07003306 unsigned int segment_size = rdata->pagesz;
3307
3308 if (i == 0)
3309 segment_size -= page_offset;
3310 else
3311 page_offset = 0;
3312
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003313
Al Viro71335662016-01-09 19:54:50 -05003314 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04003315 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003316 rdata->pages[i] = NULL;
3317 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04003318 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07003319 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003320 }
Long Li1dbe3462018-05-30 12:47:55 -07003321
Al Viro71335662016-01-09 19:54:50 -05003322 n = len;
Long Li1dbe3462018-05-30 12:47:55 -07003323 if (len >= segment_size)
Al Viro71335662016-01-09 19:54:50 -05003324 /* enough data to fill the page */
Long Li1dbe3462018-05-30 12:47:55 -07003325 n = segment_size;
3326 else
Al Viro71335662016-01-09 19:54:50 -05003327 rdata->tailsz = len;
Long Li1dbe3462018-05-30 12:47:55 -07003328 len -= n;
3329
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003330 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003331 result = copy_page_from_iter(
3332 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003333#ifdef CONFIG_CIFS_SMB_DIRECT
3334 else if (rdata->mr)
3335 result = n;
3336#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003337 else
Long Li1dbe3462018-05-30 12:47:55 -07003338 result = cifs_read_page_from_socket(
3339 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003340 if (result < 0)
3341 break;
3342
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003343 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003344 }
3345
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003346 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3347 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003348}
3349
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003350static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003351cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3352 struct cifs_readdata *rdata, unsigned int len)
3353{
3354 return uncached_fill_pages(server, rdata, NULL, len);
3355}
3356
3357static int
3358cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3359 struct cifs_readdata *rdata,
3360 struct iov_iter *iter)
3361{
3362 return uncached_fill_pages(server, rdata, iter, iter->count);
3363}
3364
Long Li6e6e2b82018-10-31 22:13:09 +00003365static int cifs_resend_rdata(struct cifs_readdata *rdata,
3366 struct list_head *rdata_list,
3367 struct cifs_aio_ctx *ctx)
3368{
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003369 unsigned int rsize;
3370 struct cifs_credits credits;
Long Li6e6e2b82018-10-31 22:13:09 +00003371 int rc;
3372 struct TCP_Server_Info *server =
3373 tlink_tcon(rdata->cfile->tlink)->ses->server;
3374
Long Li6e6e2b82018-10-31 22:13:09 +00003375 do {
Long Li0b0dfd52019-03-15 07:55:00 +00003376 if (rdata->cfile->invalidHandle) {
3377 rc = cifs_reopen_file(rdata->cfile, true);
3378 if (rc == -EAGAIN)
3379 continue;
3380 else if (rc)
3381 break;
3382 }
3383
3384 /*
3385 * Wait for credits to resend this rdata.
3386 * Note: we are attempting to resend the whole rdata not in
3387 * segments
3388 */
3389 do {
3390 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
Long Li6e6e2b82018-10-31 22:13:09 +00003391 &rsize, &credits);
3392
Long Li0b0dfd52019-03-15 07:55:00 +00003393 if (rc)
3394 goto fail;
Long Li6e6e2b82018-10-31 22:13:09 +00003395
Long Li0b0dfd52019-03-15 07:55:00 +00003396 if (rsize < rdata->bytes) {
3397 add_credits_and_wake_if(server, &credits, 0);
3398 msleep(1000);
3399 }
3400 } while (rsize < rdata->bytes);
3401 rdata->credits = credits;
3402
3403 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3404 if (!rc) {
3405 if (rdata->cfile->invalidHandle)
3406 rc = -EAGAIN;
3407 else
3408 rc = server->ops->async_readv(rdata);
Long Li6e6e2b82018-10-31 22:13:09 +00003409 }
Long Li6e6e2b82018-10-31 22:13:09 +00003410
Long Li0b0dfd52019-03-15 07:55:00 +00003411 /* If the read was successfully sent, we are done */
3412 if (!rc) {
3413 /* Add to aio pending list */
3414 list_add_tail(&rdata->list, rdata_list);
3415 return 0;
3416 }
Long Li6e6e2b82018-10-31 22:13:09 +00003417
Long Li0b0dfd52019-03-15 07:55:00 +00003418 /* Roll back credits and retry if needed */
3419 add_credits_and_wake_if(server, &rdata->credits, 0);
3420 } while (rc == -EAGAIN);
Long Li6e6e2b82018-10-31 22:13:09 +00003421
Long Li0b0dfd52019-03-15 07:55:00 +00003422fail:
3423 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Long Li6e6e2b82018-10-31 22:13:09 +00003424 return rc;
3425}
3426
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003427static int
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003428cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003429 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3430 struct cifs_aio_ctx *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003432 struct cifs_readdata *rdata;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003433 unsigned int npages, rsize;
3434 struct cifs_credits credits_on_stack;
3435 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003436 size_t cur_len;
3437 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003438 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003439 struct TCP_Server_Info *server;
Long Li6e6e2b82018-10-31 22:13:09 +00003440 struct page **pagevec;
3441 size_t start;
3442 struct iov_iter direct_iov = ctx->iter;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003443
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003444 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003445
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003446 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3447 pid = open_file->pid;
3448 else
3449 pid = current->tgid;
3450
Long Li6e6e2b82018-10-31 22:13:09 +00003451 if (ctx->direct_io)
3452 iov_iter_advance(&direct_iov, offset - ctx->pos);
3453
Jeff Layton1c892542012-05-16 07:13:17 -04003454 do {
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003455 if (open_file->invalidHandle) {
3456 rc = cifs_reopen_file(open_file, true);
3457 if (rc == -EAGAIN)
3458 continue;
3459 else if (rc)
3460 break;
3461 }
3462
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003463 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003464 &rsize, credits);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003465 if (rc)
3466 break;
3467
3468 cur_len = min_t(const size_t, len, rsize);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003469
Long Li6e6e2b82018-10-31 22:13:09 +00003470 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05003471 ssize_t result;
Long Li6e6e2b82018-10-31 22:13:09 +00003472
Steve Frenchb98e26d2018-11-01 10:54:32 -05003473 result = iov_iter_get_pages_alloc(
Long Li6e6e2b82018-10-31 22:13:09 +00003474 &direct_iov, &pagevec,
3475 cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05003476 if (result < 0) {
Long Li6e6e2b82018-10-31 22:13:09 +00003477 cifs_dbg(VFS,
Long Li54e94ff2018-12-16 22:41:07 +00003478 "couldn't get user pages (rc=%zd)"
Long Li6e6e2b82018-10-31 22:13:09 +00003479 " iter type %d"
3480 " iov_offset %zd count %zd\n",
Steve Frenchb98e26d2018-11-01 10:54:32 -05003481 result, direct_iov.type,
Long Li6e6e2b82018-10-31 22:13:09 +00003482 direct_iov.iov_offset,
3483 direct_iov.count);
3484 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00003485
3486 rc = result;
3487 add_credits_and_wake_if(server, credits, 0);
Long Li6e6e2b82018-10-31 22:13:09 +00003488 break;
3489 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05003490 cur_len = (size_t)result;
Long Li6e6e2b82018-10-31 22:13:09 +00003491 iov_iter_advance(&direct_iov, cur_len);
3492
3493 rdata = cifs_readdata_direct_alloc(
3494 pagevec, cifs_uncached_readv_complete);
3495 if (!rdata) {
3496 add_credits_and_wake_if(server, credits, 0);
3497 rc = -ENOMEM;
3498 break;
3499 }
3500
3501 npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
3502 rdata->page_offset = start;
3503 rdata->tailsz = npages > 1 ?
3504 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
3505 cur_len;
3506
3507 } else {
3508
3509 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3510 /* allocate a readdata struct */
3511 rdata = cifs_readdata_alloc(npages,
Jeff Layton1c892542012-05-16 07:13:17 -04003512 cifs_uncached_readv_complete);
Long Li6e6e2b82018-10-31 22:13:09 +00003513 if (!rdata) {
3514 add_credits_and_wake_if(server, credits, 0);
3515 rc = -ENOMEM;
3516 break;
3517 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003518
Long Li6e6e2b82018-10-31 22:13:09 +00003519 rc = cifs_read_allocate_pages(rdata, npages);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08003520 if (rc) {
3521 kvfree(rdata->pages);
3522 kfree(rdata);
3523 add_credits_and_wake_if(server, credits, 0);
3524 break;
3525 }
Long Li6e6e2b82018-10-31 22:13:09 +00003526
3527 rdata->tailsz = PAGE_SIZE;
3528 }
Jeff Layton1c892542012-05-16 07:13:17 -04003529
3530 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003531 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003532 rdata->offset = offset;
3533 rdata->bytes = cur_len;
3534 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003535 rdata->pagesz = PAGE_SIZE;
3536 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003537 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003538 rdata->credits = credits_on_stack;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003539 rdata->ctx = ctx;
3540 kref_get(&ctx->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003541
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003542 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3543
3544 if (!rc) {
3545 if (rdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003546 rc = -EAGAIN;
3547 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003548 rc = server->ops->async_readv(rdata);
3549 }
3550
Jeff Layton1c892542012-05-16 07:13:17 -04003551 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003552 add_credits_and_wake_if(server, &rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003553 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003554 cifs_uncached_readdata_release);
3555 if (rc == -EAGAIN) {
3556 iov_iter_revert(&direct_iov, cur_len);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003557 continue;
Long Li6e6e2b82018-10-31 22:13:09 +00003558 }
Jeff Layton1c892542012-05-16 07:13:17 -04003559 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003560 }
Jeff Layton1c892542012-05-16 07:13:17 -04003561
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003562 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003563 offset += cur_len;
3564 len -= cur_len;
3565 } while (len > 0);
3566
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003567 return rc;
3568}
3569
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003570static void
3571collect_uncached_read_data(struct cifs_aio_ctx *ctx)
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003572{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003573 struct cifs_readdata *rdata, *tmp;
3574 struct iov_iter *to = &ctx->iter;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003575 struct cifs_sb_info *cifs_sb;
3576 struct cifs_tcon *tcon;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003577 int rc;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003578
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003579 tcon = tlink_tcon(ctx->cfile->tlink);
3580 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003581
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003582 mutex_lock(&ctx->aio_mutex);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003583
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003584 if (list_empty(&ctx->list)) {
3585 mutex_unlock(&ctx->aio_mutex);
3586 return;
3587 }
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003588
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003589 rc = ctx->rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003590 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003591again:
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003592 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
Jeff Layton1c892542012-05-16 07:13:17 -04003593 if (!rc) {
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003594 if (!try_wait_for_completion(&rdata->done)) {
3595 mutex_unlock(&ctx->aio_mutex);
3596 return;
3597 }
3598
3599 if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003600 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003601 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003602 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003603
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003604 list_del_init(&rdata->list);
3605 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003606
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003607 /*
3608 * Got a part of data and then reconnect has
3609 * happened -- fill the buffer and continue
3610 * reading.
3611 */
3612 if (got_bytes && got_bytes < rdata->bytes) {
Long Li6e6e2b82018-10-31 22:13:09 +00003613 rc = 0;
3614 if (!ctx->direct_io)
3615 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003616 if (rc) {
3617 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003618 cifs_uncached_readdata_release);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003619 continue;
3620 }
3621 }
3622
Long Li6e6e2b82018-10-31 22:13:09 +00003623 if (ctx->direct_io) {
3624 /*
3625 * Re-use rdata as this is a
3626 * direct I/O
3627 */
3628 rc = cifs_resend_rdata(
3629 rdata,
3630 &tmp_list, ctx);
3631 } else {
3632 rc = cifs_send_async_read(
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003633 rdata->offset + got_bytes,
3634 rdata->bytes - got_bytes,
3635 rdata->cfile, cifs_sb,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003636 &tmp_list, ctx);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003637
Long Li6e6e2b82018-10-31 22:13:09 +00003638 kref_put(&rdata->refcount,
3639 cifs_uncached_readdata_release);
3640 }
3641
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003642 list_splice(&tmp_list, &ctx->list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003643
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003644 goto again;
3645 } else if (rdata->result)
3646 rc = rdata->result;
Long Li6e6e2b82018-10-31 22:13:09 +00003647 else if (!ctx->direct_io)
Jeff Layton1c892542012-05-16 07:13:17 -04003648 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003649
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003650 /* if there was a short read -- discard anything left */
3651 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3652 rc = -ENODATA;
Long Li6e6e2b82018-10-31 22:13:09 +00003653
3654 ctx->total_len += rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003655 }
3656 list_del_init(&rdata->list);
3657 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003659
Jérôme Glisse13f59382019-04-10 15:37:47 -04003660 if (!ctx->direct_io)
Long Li6e6e2b82018-10-31 22:13:09 +00003661 ctx->total_len = ctx->len - iov_iter_count(to);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003662
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003663 /* mask nodata case */
3664 if (rc == -ENODATA)
3665 rc = 0;
3666
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003667 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3668
3669 mutex_unlock(&ctx->aio_mutex);
3670
3671 if (ctx->iocb && ctx->iocb->ki_complete)
3672 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3673 else
3674 complete(&ctx->done);
3675}
3676
Long Li6e6e2b82018-10-31 22:13:09 +00003677static ssize_t __cifs_readv(
3678 struct kiocb *iocb, struct iov_iter *to, bool direct)
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003679{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003680 size_t len;
Long Li6e6e2b82018-10-31 22:13:09 +00003681 struct file *file = iocb->ki_filp;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003682 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003683 struct cifsFileInfo *cfile;
Long Li6e6e2b82018-10-31 22:13:09 +00003684 struct cifs_tcon *tcon;
3685 ssize_t rc, total_read = 0;
3686 loff_t offset = iocb->ki_pos;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003687 struct cifs_aio_ctx *ctx;
3688
Long Li6e6e2b82018-10-31 22:13:09 +00003689 /*
3690 * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
3691 * fall back to data copy read path
3692 * this could be improved by getting pages directly in ITER_KVEC
3693 */
3694 if (direct && to->type & ITER_KVEC) {
3695 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
3696 direct = false;
3697 }
3698
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003699 len = iov_iter_count(to);
3700 if (!len)
3701 return 0;
3702
3703 cifs_sb = CIFS_FILE_SB(file);
3704 cfile = file->private_data;
3705 tcon = tlink_tcon(cfile->tlink);
3706
3707 if (!tcon->ses->server->ops->async_readv)
3708 return -ENOSYS;
3709
3710 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3711 cifs_dbg(FYI, "attempting read on write only file instance\n");
3712
3713 ctx = cifs_aio_ctx_alloc();
3714 if (!ctx)
3715 return -ENOMEM;
3716
3717 ctx->cfile = cifsFileInfo_get(cfile);
3718
3719 if (!is_sync_kiocb(iocb))
3720 ctx->iocb = iocb;
3721
David Howells00e23702018-10-22 13:07:28 +01003722 if (iter_is_iovec(to))
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003723 ctx->should_dirty = true;
3724
Long Li6e6e2b82018-10-31 22:13:09 +00003725 if (direct) {
3726 ctx->pos = offset;
3727 ctx->direct_io = true;
3728 ctx->iter = *to;
3729 ctx->len = len;
3730 } else {
3731 rc = setup_aio_ctx_iter(ctx, to, READ);
3732 if (rc) {
3733 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3734 return rc;
3735 }
3736 len = ctx->len;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003737 }
3738
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003739 /* grab a lock here due to read response handlers can access ctx */
3740 mutex_lock(&ctx->aio_mutex);
3741
3742 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
3743
3744 /* if at least one read request send succeeded, then reset rc */
3745 if (!list_empty(&ctx->list))
3746 rc = 0;
3747
3748 mutex_unlock(&ctx->aio_mutex);
3749
3750 if (rc) {
3751 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3752 return rc;
3753 }
3754
3755 if (!is_sync_kiocb(iocb)) {
3756 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3757 return -EIOCBQUEUED;
3758 }
3759
3760 rc = wait_for_completion_killable(&ctx->done);
3761 if (rc) {
3762 mutex_lock(&ctx->aio_mutex);
3763 ctx->rc = rc = -EINTR;
3764 total_read = ctx->total_len;
3765 mutex_unlock(&ctx->aio_mutex);
3766 } else {
3767 rc = ctx->rc;
3768 total_read = ctx->total_len;
3769 }
3770
3771 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3772
Al Viro0165e812014-02-04 14:19:48 -05003773 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003774 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003775 return total_read;
3776 }
3777 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003778}
3779
Long Li6e6e2b82018-10-31 22:13:09 +00003780ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
3781{
3782 return __cifs_readv(iocb, to, true);
3783}
3784
3785ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3786{
3787 return __cifs_readv(iocb, to, false);
3788}
3789
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003790ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003791cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003792{
Al Viro496ad9a2013-01-23 17:07:38 -05003793 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003794 struct cifsInodeInfo *cinode = CIFS_I(inode);
3795 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3796 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3797 iocb->ki_filp->private_data;
3798 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3799 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003800
3801 /*
3802 * In strict cache mode we need to read from the server all the time
3803 * if we don't have level II oplock because the server can delay mtime
3804 * change - so we can't make a decision about inode invalidating.
3805 * And we can also fail with pagereading if there are mandatory locks
3806 * on pages affected by this read but not on the region from pos to
3807 * pos+len-1.
3808 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003809 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003810 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003811
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003812 if (cap_unix(tcon->ses) &&
3813 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3814 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003815 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003816
3817 /*
3818 * We need to hold the sem to be sure nobody modifies lock list
3819 * with a brlock that prevents reading.
3820 */
3821 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003822 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003823 tcon->ses->server->vals->shared_lock_type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10003824 0, NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003825 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003826 up_read(&cinode->lock_sem);
3827 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003828}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003829
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003830static ssize_t
3831cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832{
3833 int rc = -EACCES;
3834 unsigned int bytes_read = 0;
3835 unsigned int total_read;
3836 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003837 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003839 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003840 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003841 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003842 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003843 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003844 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003845 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003846 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003847
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003848 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003849 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003850
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003851 /* FIXME: set up handlers for larger reads and/or convert to async */
3852 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3853
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303855 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003856 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303857 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003859 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003860 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003861 server = tcon->ses->server;
3862
3863 if (!server->ops->sync_read) {
3864 free_xid(xid);
3865 return -ENOSYS;
3866 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003868 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3869 pid = open_file->pid;
3870 else
3871 pid = current->tgid;
3872
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003874 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003876 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3877 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003878 do {
3879 current_read_size = min_t(uint, read_size - total_read,
3880 rsize);
3881 /*
3882 * For windows me and 9x we do not want to request more
3883 * than it negotiated since it will refuse the read
3884 * then.
3885 */
3886 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003887 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003888 current_read_size = min_t(uint,
3889 current_read_size, CIFSMaxBufSize);
3890 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003891 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003892 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893 if (rc != 0)
3894 break;
3895 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003896 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003897 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003898 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003899 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003900 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003901 &bytes_read, &cur_offset,
3902 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003903 } while (rc == -EAGAIN);
3904
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905 if (rc || (bytes_read == 0)) {
3906 if (total_read) {
3907 break;
3908 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003909 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910 return rc;
3911 }
3912 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003913 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003914 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915 }
3916 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003917 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003918 return total_read;
3919}
3920
Jeff Laytonca83ce32011-04-12 09:13:44 -04003921/*
3922 * If the page is mmap'ed into a process' page tables, then we need to make
3923 * sure that it doesn't change while being written back.
3924 */
Souptick Joardera5240cb2018-04-15 00:58:25 +05303925static vm_fault_t
Dave Jiang11bac802017-02-24 14:56:41 -08003926cifs_page_mkwrite(struct vm_fault *vmf)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003927{
3928 struct page *page = vmf->page;
3929
3930 lock_page(page);
3931 return VM_FAULT_LOCKED;
3932}
3933
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003934static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003935 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003936 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003937 .page_mkwrite = cifs_page_mkwrite,
3938};
3939
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003940int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3941{
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003942 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05003943 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003944
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003945 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003946
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003947 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003948 rc = cifs_zap_mapping(inode);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003949 if (!rc)
3950 rc = generic_file_mmap(file, vma);
3951 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003952 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003953
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003954 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003955 return rc;
3956}
3957
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3959{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960 int rc, xid;
3961
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003962 xid = get_xid();
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003963
Jeff Laytonabab0952010-02-12 07:44:18 -05003964 rc = cifs_revalidate_file(file);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003965 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003966 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3967 rc);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003968 if (!rc)
3969 rc = generic_file_mmap(file, vma);
3970 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003971 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003972
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003973 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974 return rc;
3975}
3976
Jeff Layton0471ca32012-05-16 07:13:16 -04003977static void
3978cifs_readv_complete(struct work_struct *work)
3979{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003980 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003981 struct cifs_readdata *rdata = container_of(work,
3982 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003983
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003984 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003985 for (i = 0; i < rdata->nr_pages; i++) {
3986 struct page *page = rdata->pages[i];
3987
Jeff Layton0471ca32012-05-16 07:13:16 -04003988 lru_cache_add_file(page);
3989
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003990 if (rdata->result == 0 ||
3991 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003992 flush_dcache_page(page);
3993 SetPageUptodate(page);
3994 }
3995
3996 unlock_page(page);
3997
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003998 if (rdata->result == 0 ||
3999 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04004000 cifs_readpage_to_fscache(rdata->mapping->host, page);
4001
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004002 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004003
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004004 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004005 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04004006 }
Jeff Layton6993f742012-05-16 07:13:17 -04004007 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04004008}
4009
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004010static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004011readpages_fill_pages(struct TCP_Server_Info *server,
4012 struct cifs_readdata *rdata, struct iov_iter *iter,
4013 unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004014{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004015 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004016 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004017 u64 eof;
4018 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004019 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07004020 unsigned int page_offset = rdata->page_offset;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004021
4022 /* determine the eof that the server (probably) has */
4023 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004024 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05004025 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004026
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004027 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004028 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004029 for (i = 0; i < nr_pages; i++) {
4030 struct page *page = rdata->pages[i];
Long Li1dbe3462018-05-30 12:47:55 -07004031 unsigned int to_read = rdata->pagesz;
4032 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004033
Long Li1dbe3462018-05-30 12:47:55 -07004034 if (i == 0)
4035 to_read -= page_offset;
4036 else
4037 page_offset = 0;
4038
4039 n = to_read;
4040
4041 if (len >= to_read) {
4042 len -= to_read;
Jeff Layton8321fec2012-09-19 06:22:32 -07004043 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004044 /* enough for partial page, fill and zero the rest */
Long Li1dbe3462018-05-30 12:47:55 -07004045 zero_user(page, len + page_offset, to_read - len);
Al Viro71335662016-01-09 19:54:50 -05004046 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07004047 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004048 } else if (page->index > eof_index) {
4049 /*
4050 * The VFS will not try to do readahead past the
4051 * i_size, but it's possible that we have outstanding
4052 * writes with gaps in the middle and the i_size hasn't
4053 * caught up yet. Populate those with zeroed out pages
4054 * to prevent the VFS from repeatedly attempting to
4055 * fill them until the writes are flushed.
4056 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004057 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004058 lru_cache_add_file(page);
4059 flush_dcache_page(page);
4060 SetPageUptodate(page);
4061 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004062 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004063 rdata->pages[i] = NULL;
4064 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07004065 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004066 } else {
4067 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004068 lru_cache_add_file(page);
4069 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004070 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004071 rdata->pages[i] = NULL;
4072 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07004073 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004074 }
Jeff Layton8321fec2012-09-19 06:22:32 -07004075
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004076 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07004077 result = copy_page_from_iter(
4078 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07004079#ifdef CONFIG_CIFS_SMB_DIRECT
4080 else if (rdata->mr)
4081 result = n;
4082#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004083 else
Long Li1dbe3462018-05-30 12:47:55 -07004084 result = cifs_read_page_from_socket(
4085 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07004086 if (result < 0)
4087 break;
4088
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004089 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004090 }
4091
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004092 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
4093 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004094}
4095
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004096static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004097cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
4098 struct cifs_readdata *rdata, unsigned int len)
4099{
4100 return readpages_fill_pages(server, rdata, NULL, len);
4101}
4102
4103static int
4104cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
4105 struct cifs_readdata *rdata,
4106 struct iov_iter *iter)
4107{
4108 return readpages_fill_pages(server, rdata, iter, iter->count);
4109}
4110
4111static int
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004112readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
4113 unsigned int rsize, struct list_head *tmplist,
4114 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
4115{
4116 struct page *page, *tpage;
4117 unsigned int expected_index;
4118 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07004119 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004120
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004121 INIT_LIST_HEAD(tmplist);
4122
Nikolay Borisovf86196e2019-01-03 15:29:02 -08004123 page = lru_to_page(page_list);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004124
4125 /*
4126 * Lock the page and put it in the cache. Since no one else
4127 * should have access to this page, we're safe to simply set
4128 * PG_locked without checking it first.
4129 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004130 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004131 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07004132 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004133
4134 /* give up if we can't stick it in the cache */
4135 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004136 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004137 return rc;
4138 }
4139
4140 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004141 *offset = (loff_t)page->index << PAGE_SHIFT;
4142 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004143 *nr_pages = 1;
4144 list_move_tail(&page->lru, tmplist);
4145
4146 /* now try and add more pages onto the request */
4147 expected_index = page->index + 1;
4148 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
4149 /* discontinuity ? */
4150 if (page->index != expected_index)
4151 break;
4152
4153 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004154 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004155 break;
4156
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004157 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07004158 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004159 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004160 break;
4161 }
4162 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004163 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004164 expected_index++;
4165 (*nr_pages)++;
4166 }
4167 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168}
4169
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170static int cifs_readpages(struct file *file, struct address_space *mapping,
4171 struct list_head *page_list, unsigned num_pages)
4172{
Jeff Layton690c5e32011-10-19 15:30:16 -04004173 int rc;
4174 struct list_head tmplist;
4175 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04004176 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004177 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04004178 pid_t pid;
Steve French0cb012d2018-10-11 01:01:02 -05004179 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180
Steve French0cb012d2018-10-11 01:01:02 -05004181 xid = get_xid();
Jeff Layton690c5e32011-10-19 15:30:16 -04004182 /*
Suresh Jayaraman566982362010-07-05 18:13:25 +05304183 * Reads as many pages as possible from fscache. Returns -ENOBUFS
4184 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00004185 *
4186 * After this point, every page in the list might have PG_fscache set,
4187 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman566982362010-07-05 18:13:25 +05304188 */
4189 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
4190 &num_pages);
Steve French0cb012d2018-10-11 01:01:02 -05004191 if (rc == 0) {
4192 free_xid(xid);
Jeff Layton690c5e32011-10-19 15:30:16 -04004193 return rc;
Steve French0cb012d2018-10-11 01:01:02 -05004194 }
Suresh Jayaraman566982362010-07-05 18:13:25 +05304195
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004196 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4197 pid = open_file->pid;
4198 else
4199 pid = current->tgid;
4200
Jeff Layton690c5e32011-10-19 15:30:16 -04004201 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004202 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004203
Joe Perchesf96637b2013-05-04 22:12:25 -05004204 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4205 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04004206
4207 /*
4208 * Start with the page at end of list and move it to private
4209 * list. Do the same with any following pages until we hit
4210 * the rsize limit, hit an index discontinuity, or run out of
4211 * pages. Issue the async read and then start the loop again
4212 * until the list is empty.
4213 *
4214 * Note that list order is important. The page_list is in
4215 * the order of declining indexes. When we put the pages in
4216 * the rdata->pages, then we want them in increasing order.
4217 */
4218 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004219 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04004220 loff_t offset;
4221 struct page *page, *tpage;
4222 struct cifs_readdata *rdata;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004223 struct cifs_credits credits_on_stack;
4224 struct cifs_credits *credits = &credits_on_stack;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004226 if (open_file->invalidHandle) {
4227 rc = cifs_reopen_file(open_file, true);
4228 if (rc == -EAGAIN)
4229 continue;
4230 else if (rc)
4231 break;
4232 }
4233
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004234 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004235 &rsize, credits);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004236 if (rc)
4237 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238
Jeff Layton690c5e32011-10-19 15:30:16 -04004239 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004240 * Give up immediately if rsize is too small to read an entire
4241 * page. The VFS will fall back to readpage. We should never
4242 * reach this point however since we set ra_pages to 0 when the
4243 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04004244 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004245 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004246 add_credits_and_wake_if(server, credits, 0);
Steve French0cb012d2018-10-11 01:01:02 -05004247 free_xid(xid);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004248 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004251 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
4252 &nr_pages, &offset, &bytes);
4253 if (rc) {
4254 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04004256 }
4257
Jeff Layton0471ca32012-05-16 07:13:16 -04004258 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04004259 if (!rdata) {
4260 /* best to give up if we're out of mem */
4261 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4262 list_del(&page->lru);
4263 lru_cache_add_file(page);
4264 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004265 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04004266 }
4267 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004268 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04004269 break;
4270 }
4271
Jeff Layton6993f742012-05-16 07:13:17 -04004272 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04004273 rdata->mapping = mapping;
4274 rdata->offset = offset;
4275 rdata->bytes = bytes;
4276 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004277 rdata->pagesz = PAGE_SIZE;
Long Li1dbe3462018-05-30 12:47:55 -07004278 rdata->tailsz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07004279 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004280 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004281 rdata->credits = credits_on_stack;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004282
4283 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4284 list_del(&page->lru);
4285 rdata->pages[rdata->nr_pages++] = page;
4286 }
Jeff Layton690c5e32011-10-19 15:30:16 -04004287
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004288 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4289
4290 if (!rc) {
4291 if (rdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004292 rc = -EAGAIN;
4293 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004294 rc = server->ops->async_readv(rdata);
4295 }
4296
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004297 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004298 add_credits_and_wake_if(server, &rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004299 for (i = 0; i < rdata->nr_pages; i++) {
4300 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04004301 lru_cache_add_file(page);
4302 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004303 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04004305 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04004306 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307 break;
4308 }
Jeff Layton6993f742012-05-16 07:13:17 -04004309
4310 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311 }
4312
David Howells54afa992013-09-04 17:10:39 +00004313 /* Any pages that have been shown to fscache but didn't get added to
4314 * the pagecache must be uncached before they get returned to the
4315 * allocator.
4316 */
4317 cifs_fscache_readpages_cancel(mapping->host, page_list);
Steve French0cb012d2018-10-11 01:01:02 -05004318 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319 return rc;
4320}
4321
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01004322/*
4323 * cifs_readpage_worker must be called with the page pinned
4324 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325static int cifs_readpage_worker(struct file *file, struct page *page,
4326 loff_t *poffset)
4327{
4328 char *read_data;
4329 int rc;
4330
Suresh Jayaraman566982362010-07-05 18:13:25 +05304331 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05004332 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304333 if (rc == 0)
4334 goto read_complete;
4335
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336 read_data = kmap(page);
4337 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004338
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004339 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004340
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341 if (rc < 0)
4342 goto io_error;
4343 else
Joe Perchesf96637b2013-05-04 22:12:25 -05004344 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004345
Steve French9b9c5be2018-09-22 12:07:06 -05004346 /* we do not want atime to be less than mtime, it broke some apps */
4347 file_inode(file)->i_atime = current_time(file_inode(file));
4348 if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4349 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4350 else
4351 file_inode(file)->i_atime = current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004352
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004353 if (PAGE_SIZE > rc)
4354 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355
4356 flush_dcache_page(page);
4357 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304358
4359 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05004360 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304361
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004363
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004365 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004366 unlock_page(page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304367
4368read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369 return rc;
4370}
4371
4372static int cifs_readpage(struct file *file, struct page *page)
4373{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004374 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004375 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004376 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004378 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004379
4380 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304381 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004382 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304383 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384 }
4385
Joe Perchesf96637b2013-05-04 22:12:25 -05004386 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00004387 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388
4389 rc = cifs_readpage_worker(file, page, &offset);
4390
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004391 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004392 return rc;
4393}
4394
Steve Frencha403a0a2007-07-26 15:54:16 +00004395static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4396{
4397 struct cifsFileInfo *open_file;
Steve French3afca262016-09-22 18:58:16 -05004398 struct cifs_tcon *tcon =
4399 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
Steve Frencha403a0a2007-07-26 15:54:16 +00004400
Steve French3afca262016-09-22 18:58:16 -05004401 spin_lock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004402 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04004403 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French3afca262016-09-22 18:58:16 -05004404 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004405 return 1;
4406 }
4407 }
Steve French3afca262016-09-22 18:58:16 -05004408 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004409 return 0;
4410}
4411
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412/* We do not want to update the file size from server for inodes
4413 open for write - to avoid races with writepage extending
4414 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004415 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004416 but this is tricky to do without racing with writebehind
4417 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00004418bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004419{
Steve Frencha403a0a2007-07-26 15:54:16 +00004420 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00004421 return true;
Steve French23e7dd72005-10-20 13:44:56 -07004422
Steve Frencha403a0a2007-07-26 15:54:16 +00004423 if (is_inode_writable(cifsInode)) {
4424 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08004425 struct cifs_sb_info *cifs_sb;
4426
Steve Frenchc32a0b62006-01-12 14:41:28 -08004427 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00004428 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004429 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08004430 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00004431 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08004432 }
4433
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004434 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00004435 return true;
Steve French7ba526312007-02-08 18:14:13 +00004436
Steve French4b18f2a2008-04-29 00:06:05 +00004437 return false;
Steve French23e7dd72005-10-20 13:44:56 -07004438 } else
Steve French4b18f2a2008-04-29 00:06:05 +00004439 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004440}
4441
Nick Piggind9414772008-09-24 11:32:59 -04004442static int cifs_write_begin(struct file *file, struct address_space *mapping,
4443 loff_t pos, unsigned len, unsigned flags,
4444 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445{
Sachin Prabhu466bd312013-09-13 14:11:57 +01004446 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004447 pgoff_t index = pos >> PAGE_SHIFT;
4448 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004449 loff_t page_start = pos & PAGE_MASK;
4450 loff_t i_size;
4451 struct page *page;
4452 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453
Joe Perchesf96637b2013-05-04 22:12:25 -05004454 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04004455
Sachin Prabhu466bd312013-09-13 14:11:57 +01004456start:
Nick Piggin54566b22009-01-04 12:00:53 -08004457 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004458 if (!page) {
4459 rc = -ENOMEM;
4460 goto out;
4461 }
Nick Piggind9414772008-09-24 11:32:59 -04004462
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004463 if (PageUptodate(page))
4464 goto out;
Steve French8a236262007-03-06 00:31:00 +00004465
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004466 /*
4467 * If we write a full page it will be up to date, no need to read from
4468 * the server. If the write is short, we'll end up doing a sync write
4469 * instead.
4470 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004471 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004472 goto out;
4473
4474 /*
4475 * optimize away the read when we have an oplock, and we're not
4476 * expecting to use any of the data we'd be reading in. That
4477 * is, when the page lies beyond the EOF, or straddles the EOF
4478 * and the write will cover all of the existing data.
4479 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004480 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004481 i_size = i_size_read(mapping->host);
4482 if (page_start >= i_size ||
4483 (offset == 0 && (pos + len) >= i_size)) {
4484 zero_user_segments(page, 0, offset,
4485 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004486 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004487 /*
4488 * PageChecked means that the parts of the page
4489 * to which we're not writing are considered up
4490 * to date. Once the data is copied to the
4491 * page, it can be set uptodate.
4492 */
4493 SetPageChecked(page);
4494 goto out;
4495 }
4496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497
Sachin Prabhu466bd312013-09-13 14:11:57 +01004498 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004499 /*
4500 * might as well read a page, it is fast enough. If we get
4501 * an error, we don't need to return it. cifs_write_end will
4502 * do a sync write instead since PG_uptodate isn't set.
4503 */
4504 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004505 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004506 oncethru = 1;
4507 goto start;
Steve French8a236262007-03-06 00:31:00 +00004508 } else {
4509 /* we could try using another file handle if there is one -
4510 but how would we lock it to prevent close of that handle
4511 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04004512 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00004513 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004514out:
4515 *pagep = page;
4516 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517}
4518
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304519static int cifs_release_page(struct page *page, gfp_t gfp)
4520{
4521 if (PagePrivate(page))
4522 return 0;
4523
4524 return cifs_fscache_release_page(page, gfp);
4525}
4526
Lukas Czernerd47992f2013-05-21 23:17:23 -04004527static void cifs_invalidate_page(struct page *page, unsigned int offset,
4528 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304529{
4530 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4531
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004532 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304533 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4534}
4535
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004536static int cifs_launder_page(struct page *page)
4537{
4538 int rc = 0;
4539 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004540 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004541 struct writeback_control wbc = {
4542 .sync_mode = WB_SYNC_ALL,
4543 .nr_to_write = 0,
4544 .range_start = range_start,
4545 .range_end = range_end,
4546 };
4547
Joe Perchesf96637b2013-05-04 22:12:25 -05004548 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004549
4550 if (clear_page_dirty_for_io(page))
4551 rc = cifs_writepage_locked(page, &wbc);
4552
4553 cifs_fscache_invalidate_page(page, page->mapping->host);
4554 return rc;
4555}
4556
Tejun Heo9b646972010-07-20 22:09:02 +02004557void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04004558{
4559 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4560 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00004561 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004562 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004563 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004564 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004565 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004566
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004567 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10004568 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004569
4570 server->ops->downgrade_oplock(server, cinode,
4571 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
4572
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004573 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004574 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004575 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4576 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004577 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004578 }
4579
Jeff Layton3bc303c2009-09-21 06:47:50 -04004580 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004581 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05004582 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00004583 else
Al Viro8737c932009-12-24 06:47:55 -05004584 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004585 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004586 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004587 rc = filemap_fdatawait(inode->i_mapping);
4588 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004589 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004590 }
Joe Perchesf96637b2013-05-04 22:12:25 -05004591 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004592 }
4593
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004594 rc = cifs_push_locks(cfile);
4595 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004596 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004597
Jeff Layton3bc303c2009-09-21 06:47:50 -04004598 /*
4599 * releasing stale oplock after recent reconnect of smb session using
4600 * a now incorrect file handle is not a data integrity issue but do
4601 * not bother sending an oplock release if session to server still is
4602 * disconnected since oplock already released by the server
4603 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00004604 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004605 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4606 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05004607 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004608 }
Aurelien Aptelb98749c2019-03-29 10:49:12 +01004609 _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004610 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004611}
4612
Steve Frenchdca69282013-11-11 16:42:37 -06004613/*
4614 * The presence of cifs_direct_io() in the address space ops vector
4615 * allowes open() O_DIRECT flags which would have failed otherwise.
4616 *
4617 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4618 * so this method should never be called.
4619 *
4620 * Direct IO is not yet supported in the cached mode.
4621 */
4622static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07004623cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06004624{
4625 /*
4626 * FIXME
4627 * Eventually need to support direct IO for non forcedirectio mounts
4628 */
4629 return -EINVAL;
4630}
4631
4632
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004633const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004634 .readpage = cifs_readpage,
4635 .readpages = cifs_readpages,
4636 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07004637 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004638 .write_begin = cifs_write_begin,
4639 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004640 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304641 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06004642 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304643 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004644 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004646
4647/*
4648 * cifs_readpages requires the server to support a buffer large enough to
4649 * contain the header plus one complete page of data. Otherwise, we need
4650 * to leave cifs_readpages out of the address space operations.
4651 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004652const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004653 .readpage = cifs_readpage,
4654 .writepage = cifs_writepage,
4655 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004656 .write_begin = cifs_write_begin,
4657 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004658 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304659 .releasepage = cifs_release_page,
4660 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004661 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004662};