blob: 5ad15de2bb4f950cf88d72f3d15b3b3132e72bcd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Nikolay Borisovf86196e2019-01-03 15:29:02 -080036#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/div64.h>
38#include "cifsfs.h"
39#include "cifspdu.h"
40#include "cifsglob.h"
41#include "cifsproto.h"
42#include "cifs_unicode.h"
43#include "cifs_debug.h"
44#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053045#include "fscache.h"
Long Libd3dcc62017-11-22 17:38:47 -070046#include "smbdirect.h"
Steve French07b92d02013-02-18 10:34:26 -060047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048static inline int cifs_convert_flags(unsigned int flags)
49{
50 if ((flags & O_ACCMODE) == O_RDONLY)
51 return GENERIC_READ;
52 else if ((flags & O_ACCMODE) == O_WRONLY)
53 return GENERIC_WRITE;
54 else if ((flags & O_ACCMODE) == O_RDWR) {
55 /* GENERIC_ALL is too much permission to request
56 can cause unnecessary access denied on create */
57 /* return GENERIC_ALL; */
58 return (GENERIC_READ | GENERIC_WRITE);
59 }
60
Jeff Laytone10f7b52008-05-14 10:21:33 -070061 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
62 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
63 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000064}
Jeff Laytone10f7b52008-05-14 10:21:33 -070065
Jeff Layton608712f2010-10-15 15:33:56 -040066static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000067{
Jeff Layton608712f2010-10-15 15:33:56 -040068 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070069
Steve French7fc8f4e2009-02-23 20:43:11 +000070 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000072 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040073 posix_flags = SMB_O_WRONLY;
74 else if ((flags & O_ACCMODE) == O_RDWR)
75 posix_flags = SMB_O_RDWR;
76
Steve French07b92d02013-02-18 10:34:26 -060077 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040078 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060079 if (flags & O_EXCL)
80 posix_flags |= SMB_O_EXCL;
81 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050082 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
83 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060084
Jeff Layton608712f2010-10-15 15:33:56 -040085 if (flags & O_TRUNC)
86 posix_flags |= SMB_O_TRUNC;
87 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010088 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000090 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040091 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000092 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040093 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000094 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040095 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000096
97 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098}
99
100static inline int cifs_get_disposition(unsigned int flags)
101{
102 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
103 return FILE_CREATE;
104 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
105 return FILE_OVERWRITE_IF;
106 else if ((flags & O_CREAT) == O_CREAT)
107 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000108 else if ((flags & O_TRUNC) == O_TRUNC)
109 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 else
111 return FILE_OPEN;
112}
113
Jeff Layton608712f2010-10-15 15:33:56 -0400114int cifs_posix_open(char *full_path, struct inode **pinode,
115 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400116 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400117{
118 int rc;
119 FILE_UNIX_BASIC_INFO *presp_data;
120 __u32 posix_flags = 0;
121 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
122 struct cifs_fattr fattr;
123 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000124 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400125
Joe Perchesf96637b2013-05-04 22:12:25 -0500126 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400127
128 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
129 if (presp_data == NULL)
130 return -ENOMEM;
131
132 tlink = cifs_sb_tlink(cifs_sb);
133 if (IS_ERR(tlink)) {
134 rc = PTR_ERR(tlink);
135 goto posix_open_ret;
136 }
137
138 tcon = tlink_tcon(tlink);
139 mode &= ~current_umask();
140
141 posix_flags = cifs_posix_convert_flags(f_flags);
142 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
143 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900144 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153 if (!pinode)
154 goto posix_open_ret; /* caller does not need info */
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158 /* get new inode and set it up */
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
167 cifs_fattr_to_inode(*pinode, &fattr);
168 }
169
170posix_open_ret:
171 kfree(presp_data);
172 return rc;
173}
174
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300175static int
176cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179{
180 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300182 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500183 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300184 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700185 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400186 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300187
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700188 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700189 return -ENOSYS;
190
191 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300192
193/*********************************************************************
194 * open flag mapping table:
195 *
196 * POSIX Flag CIFS Disposition
197 * ---------- ----------------
198 * O_CREAT FILE_OPEN_IF
199 * O_CREAT | O_EXCL FILE_CREATE
200 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
201 * O_TRUNC FILE_OVERWRITE
202 * none of the above FILE_OPEN
203 *
204 * Note that there is not a direct match between disposition
205 * FILE_SUPERSEDE (ie create whether or not file exists although
206 * O_CREAT | O_TRUNC is similar but truncates the existing
207 * file rather than creating a new file as FILE_SUPERSEDE does
208 * (which uses the attributes / metadata passed in on open call)
209 *?
210 *? O_SYNC is a reasonable match to CIFS writethrough flag
211 *? and the read write flags match reasonably. O_LARGEFILE
212 *? is irrelevant because largefile support is always used
213 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
214 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
215 *********************************************************************/
216
217 disposition = cifs_get_disposition(f_flags);
218
219 /* BB pass O_SYNC flag through on file attributes .. BB */
220
221 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500225 if (backup_cred(cifs_sb))
226 create_options |= CREATE_OPEN_BACKUP_INTENT;
227
Steve French1013e762017-09-22 01:40:27 -0500228 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
229 if (f_flags & O_SYNC)
230 create_options |= CREATE_WRITE_THROUGH;
231
232 if (f_flags & O_DIRECT)
233 create_options |= CREATE_NO_BUFFER;
234
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400235 oparms.tcon = tcon;
236 oparms.cifs_sb = cifs_sb;
237 oparms.desired_access = desired_access;
238 oparms.create_options = create_options;
239 oparms.disposition = disposition;
240 oparms.path = full_path;
241 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400242 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400243
244 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300245
246 if (rc)
247 goto out;
248
249 if (tcon->unix_ext)
250 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
251 xid);
252 else
253 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600254 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300255
Pavel Shilovsky30573a822019-09-30 10:06:18 -0700256 if (rc) {
257 server->ops->close(xid, tcon, fid);
258 if (rc == -ESTALE)
259 rc = -EOPENSTALE;
260 }
261
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300262out:
263 kfree(buf);
264 return rc;
265}
266
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400267static bool
268cifs_has_mand_locks(struct cifsInodeInfo *cinode)
269{
270 struct cifs_fid_locks *cur;
271 bool has_locks = false;
272
273 down_read(&cinode->lock_sem);
274 list_for_each_entry(cur, &cinode->llist, llist) {
275 if (!list_empty(&cur->locks)) {
276 has_locks = true;
277 break;
278 }
279 }
280 up_read(&cinode->lock_sem);
281 return has_locks;
282}
283
Jeff Layton15ecb432010-10-15 15:34:02 -0400284struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700285cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400286 struct tcon_link *tlink, __u32 oplock)
287{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500288 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000289 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700290 struct cifsInodeInfo *cinode = CIFS_I(inode);
291 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700292 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700293 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400294 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400295
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700296 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
297 if (cfile == NULL)
298 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400299
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700300 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
301 if (!fdlocks) {
302 kfree(cfile);
303 return NULL;
304 }
305
306 INIT_LIST_HEAD(&fdlocks->locks);
307 fdlocks->cfile = cfile;
308 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700309 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700310 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700311 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700312
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700313 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700314 cfile->pid = current->tgid;
315 cfile->uid = current_fsuid();
316 cfile->dentry = dget(dentry);
317 cfile->f_flags = file->f_flags;
318 cfile->invalidHandle = false;
319 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700320 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700321 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500322 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400323
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100324 cifs_sb_active(inode->i_sb);
325
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400326 /*
327 * If the server returned a read oplock and we have mandatory brlocks,
328 * set oplock level to None.
329 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400330 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500331 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400332 oplock = 0;
333 }
334
Steve French3afca262016-09-22 18:58:16 -0500335 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400336 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700337 oplock = fid->pending_open->oplock;
338 list_del(&fid->pending_open->olist);
339
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400340 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400341 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700342
343 list_add(&cfile->tlist, &tcon->openFileList);
Steve Frenchfae80442018-10-19 17:14:32 -0500344 atomic_inc(&tcon->num_local_opens);
Steve French3afca262016-09-22 18:58:16 -0500345
Jeff Layton15ecb432010-10-15 15:34:02 -0400346 /* if readable file instance put first in list*/
Ronnie Sahlberg487317c2019-06-05 10:38:38 +1000347 spin_lock(&cinode->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400348 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700349 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400350 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700351 list_add_tail(&cfile->flist, &cinode->openFileList);
Ronnie Sahlberg487317c2019-06-05 10:38:38 +1000352 spin_unlock(&cinode->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500353 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400354
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400355 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400356 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400357
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700358 file->private_data = cfile;
359 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400360}
361
Jeff Layton764a1b12012-07-25 14:59:54 -0400362struct cifsFileInfo *
363cifsFileInfo_get(struct cifsFileInfo *cifs_file)
364{
Steve French3afca262016-09-22 18:58:16 -0500365 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400366 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500367 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400368 return cifs_file;
369}
370
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100371/**
372 * cifsFileInfo_put - release a reference of file priv data
373 *
374 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
Steve Frenchcdff08e2010-10-21 22:46:14 +0000375 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400376void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
377{
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100378 _cifsFileInfo_put(cifs_file, true);
379}
380
381/**
382 * _cifsFileInfo_put - release a reference of file priv data
383 *
384 * This may involve closing the filehandle @cifs_file out on the
385 * server. Must be called without holding tcon->open_file_lock and
386 * cifs_file->file_info_lock.
387 *
388 * If @wait_for_oplock_handler is true and we are releasing the last
389 * reference, wait for any running oplock break handler of the file
390 * and cancel any pending one. If calling this function from the
391 * oplock break handler, you need to pass false.
392 *
393 */
394void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
395{
David Howells2b0143b2015-03-17 22:25:59 +0000396 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000397 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700398 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300399 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100400 struct super_block *sb = inode->i_sb;
401 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000402 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700403 struct cifs_fid fid;
404 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000405 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000406
Steve French3afca262016-09-22 18:58:16 -0500407 spin_lock(&tcon->open_file_lock);
408
409 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400410 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500411 spin_unlock(&cifs_file->file_info_lock);
412 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000413 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400414 }
Steve French3afca262016-09-22 18:58:16 -0500415 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000416
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700417 if (server->ops->get_lease_key)
418 server->ops->get_lease_key(inode, &fid);
419
420 /* store open in pending opens to make sure we don't miss lease break */
421 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
422
Steve Frenchcdff08e2010-10-21 22:46:14 +0000423 /* remove it from the lists */
Ronnie Sahlberg487317c2019-06-05 10:38:38 +1000424 spin_lock(&cifsi->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000425 list_del(&cifs_file->flist);
Ronnie Sahlberg487317c2019-06-05 10:38:38 +1000426 spin_unlock(&cifsi->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000427 list_del(&cifs_file->tlist);
Steve Frenchfae80442018-10-19 17:14:32 -0500428 atomic_dec(&tcon->num_local_opens);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000429
430 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500431 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000432 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700433 /*
434 * In strict cache mode we need invalidate mapping on the last
435 * close because it may cause a error when we open this file
436 * again and get at least level II oplock.
437 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300438 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400439 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300440 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000441 }
Steve French3afca262016-09-22 18:58:16 -0500442
443 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000444
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100445 oplock_break_cancelled = wait_oplock_handler ?
446 cancel_work_sync(&cifs_file->oplock_break) : false;
Jeff Laytonad635942011-07-26 12:20:17 -0400447
Steve Frenchcdff08e2010-10-21 22:46:14 +0000448 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700449 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400450 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700451
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400452 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700453 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400454 server->ops->close(xid, tcon, &cifs_file->fid);
455 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000456 }
457
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000458 if (oplock_break_cancelled)
459 cifs_done_oplock_break(cifsi);
460
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700461 cifs_del_pending_open(&open);
462
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700463 /*
464 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000465 * is closed anyway.
466 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700467 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700468 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000469 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400470 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000471 kfree(li);
472 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700473 list_del(&cifs_file->llist->llist);
474 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700475 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000476
477 cifs_put_tlink(cifs_file->tlink);
478 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100479 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000480 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400481}
482
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700484
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485{
486 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400487 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400488 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700490 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000491 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400492 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700493 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300495 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700496 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700497 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400499 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
501 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400502 tlink = cifs_sb_tlink(cifs_sb);
503 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400504 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400505 return PTR_ERR(tlink);
506 }
507 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700508 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500510 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530512 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400513 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 }
515
Joe Perchesf96637b2013-05-04 22:12:25 -0500516 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000517 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000518
Namjae Jeon787aded2014-08-22 14:22:51 +0900519 if (file->f_flags & O_DIRECT &&
520 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
521 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
522 file->f_op = &cifs_file_direct_nobrl_ops;
523 else
524 file->f_op = &cifs_file_direct_ops;
525 }
526
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700527 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000528 oplock = REQ_OPLOCK;
529 else
530 oplock = 0;
531
Steve French64cc2c62009-03-04 19:54:08 +0000532 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400533 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
534 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000535 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400536 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000537 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700538 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000539 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500540 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300541 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000542 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
543 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500544 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
545 tcon->ses->serverName,
546 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000547 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000548 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
549 (rc != -EOPNOTSUPP)) /* path not found or net err */
550 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700551 /*
552 * Else fallthrough to retry open the old way on network i/o
553 * or DFS errors.
554 */
Steve French276a74a2009-03-03 18:00:34 +0000555 }
556
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700557 if (server->ops->get_lease_key)
558 server->ops->get_lease_key(inode, &fid);
559
560 cifs_add_pending_open(&fid, tlink, &open);
561
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300562 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700563 if (server->ops->get_lease_key)
564 server->ops->get_lease_key(inode, &fid);
565
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300566 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700567 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700568 if (rc) {
569 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300570 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700571 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300572 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400573
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700574 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
575 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700576 if (server->ops->close)
577 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700578 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 rc = -ENOMEM;
580 goto out;
581 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530583 cifs_fscache_set_inode_cookie(inode, file);
584
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300585 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700586 /*
587 * Time to set mode which we can not set earlier due to
588 * problems creating new read-only files.
589 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300590 struct cifs_unix_set_info_args args = {
591 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800592 .uid = INVALID_UID, /* no change */
593 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300594 .ctime = NO_CHANGE_64,
595 .atime = NO_CHANGE_64,
596 .mtime = NO_CHANGE_64,
597 .device = 0,
598 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700599 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
600 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 }
602
603out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400605 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400606 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 return rc;
608}
609
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400610static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
611
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700612/*
613 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400614 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700615 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400616static int
617cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400619 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000620 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400621 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 int rc = 0;
623
Rabin Vincent560d3882017-05-03 17:17:21 +0200624 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400625 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400626 /* can cache locks - no need to relock */
627 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400628 return rc;
629 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400631 if (cap_unix(tcon->ses) &&
632 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
633 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
634 rc = cifs_push_posix_locks(cfile);
635 else
636 rc = tcon->ses->server->ops->push_mand_locks(cfile);
637
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400638 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 return rc;
640}
641
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700642static int
643cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644{
645 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400646 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400647 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000649 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700650 struct TCP_Server_Info *server;
651 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000652 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700654 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500656 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400657 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400659 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700660 mutex_lock(&cfile->fh_mutex);
661 if (!cfile->invalidHandle) {
662 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530663 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400664 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530665 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 }
667
David Howells2b0143b2015-03-17 22:25:59 +0000668 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700670 tcon = tlink_tcon(cfile->tlink);
671 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000672
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700673 /*
674 * Can not grab rename sem here because various ops, including those
675 * that already have the rename sem can end up causing writepage to get
676 * called and if the server was down that means we end up here, and we
677 * can never tell if the caller already has the rename_sem.
678 */
679 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000681 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700682 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400683 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000684 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 }
686
Joe Perchesf96637b2013-05-04 22:12:25 -0500687 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
688 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300690 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 oplock = REQ_OPLOCK;
692 else
Steve French4b18f2a2008-04-29 00:06:05 +0000693 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400695 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000696 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400697 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400698 /*
699 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
700 * original open. Must mask them off for a reopen.
701 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700702 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400703 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400704
Jeff Layton2422f672010-06-16 13:40:16 -0400705 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700706 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400707 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000708 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500709 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200710 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000711 goto reopen_success;
712 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700713 /*
714 * fallthrough to retry open the old way on errors, especially
715 * in the reconnect path it is important to retry hard
716 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000717 }
718
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700719 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000720
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500721 if (backup_cred(cifs_sb))
722 create_options |= CREATE_OPEN_BACKUP_INTENT;
723
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700724 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400725 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700726
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400727 oparms.tcon = tcon;
728 oparms.cifs_sb = cifs_sb;
729 oparms.desired_access = desired_access;
730 oparms.create_options = create_options;
731 oparms.disposition = disposition;
732 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400733 oparms.fid = &cfile->fid;
734 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400735
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700736 /*
737 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400738 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700739 * file might have write behind data that needs to be flushed and server
740 * version of file size can be stale. If we knew for sure that inode was
741 * not dirty locally we could do this.
742 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400743 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400744 if (rc == -ENOENT && oparms.reconnect == false) {
745 /* durable handle timeout is expired - open the file again */
746 rc = server->ops->open(xid, &oparms, &oplock, NULL);
747 /* indicate that we need to relock the file */
748 oparms.reconnect = true;
749 }
750
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700752 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500753 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
754 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400755 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 }
Jeff Layton15886172010-10-15 15:33:59 -0400757
758reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700759 cfile->invalidHandle = false;
760 mutex_unlock(&cfile->fh_mutex);
761 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400762
763 if (can_flush) {
764 rc = filemap_write_and_wait(inode->i_mapping);
Pavel Shilovsky9a663962019-01-08 11:15:28 -0800765 if (!is_interrupt_error(rc))
766 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400767
Jeff Layton15886172010-10-15 15:33:59 -0400768 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700769 rc = cifs_get_inode_info_unix(&inode, full_path,
770 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400771 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700772 rc = cifs_get_inode_info(&inode, full_path, NULL,
773 inode->i_sb, xid, NULL);
774 }
775 /*
776 * Else we are writing out data to server already and could deadlock if
777 * we tried to flush data, and since we do not know if we have data that
778 * would invalidate the current end of file on the server we can not go
779 * to the server to get the new inode info.
780 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300781
Pavel Shilovskyde740252016-10-11 15:34:07 -0700782 /*
783 * If the server returned a read oplock and we have mandatory brlocks,
784 * set oplock level to None.
785 */
786 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
787 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
788 oplock = 0;
789 }
790
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400791 server->ops->set_fid(cfile, &cfile->fid, oplock);
792 if (oparms.reconnect)
793 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400794
795reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400797 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 return rc;
799}
800
801int cifs_close(struct inode *inode, struct file *file)
802{
Jeff Layton77970692011-04-05 16:23:47 -0700803 if (file->private_data != NULL) {
804 cifsFileInfo_put(file->private_data);
805 file->private_data = NULL;
806 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
Steve Frenchcdff08e2010-10-21 22:46:14 +0000808 /* return code from the ->release op is always ignored */
809 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810}
811
Steve French52ace1e2016-09-22 19:23:56 -0500812void
813cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
814{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700815 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500816 struct list_head *tmp;
817 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700818 struct list_head tmp_list;
819
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800820 if (!tcon->use_persistent || !tcon->need_reopen_files)
821 return;
822
823 tcon->need_reopen_files = false;
824
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700825 cifs_dbg(FYI, "Reopen persistent handles");
826 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500827
828 /* list all files open on tree connection, reopen resilient handles */
829 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700830 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500831 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700832 if (!open_file->invalidHandle)
833 continue;
834 cifsFileInfo_get(open_file);
835 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500836 }
837 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700838
839 list_for_each_safe(tmp, tmp1, &tmp_list) {
840 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800841 if (cifs_reopen_file(open_file, false /* do not flush */))
842 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700843 list_del_init(&open_file->rlist);
844 cifsFileInfo_put(open_file);
845 }
Steve French52ace1e2016-09-22 19:23:56 -0500846}
847
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848int cifs_closedir(struct inode *inode, struct file *file)
849{
850 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400851 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700852 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700853 struct cifs_tcon *tcon;
854 struct TCP_Server_Info *server;
855 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856
Joe Perchesf96637b2013-05-04 22:12:25 -0500857 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700859 if (cfile == NULL)
860 return rc;
861
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400862 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700863 tcon = tlink_tcon(cfile->tlink);
864 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
Joe Perchesf96637b2013-05-04 22:12:25 -0500866 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500867 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400868 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700869 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500870 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700871 if (server->ops->close_dir)
872 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
873 else
874 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500875 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700876 /* not much we can do if it fails anyway, ignore rc */
877 rc = 0;
878 } else
Steve French3afca262016-09-22 18:58:16 -0500879 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700881 buf = cfile->srch_inf.ntwrk_buf_start;
882 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500883 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700884 cfile->srch_inf.ntwrk_buf_start = NULL;
885 if (cfile->srch_inf.smallBuf)
886 cifs_small_buf_release(buf);
887 else
888 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700890
891 cifs_put_tlink(cfile->tlink);
892 kfree(file->private_data);
893 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400895 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 return rc;
897}
898
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400899static struct cifsLockInfo *
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000900cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000901{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400902 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000903 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400904 if (!lock)
905 return lock;
906 lock->offset = offset;
907 lock->length = length;
908 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400909 lock->pid = current->tgid;
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000910 lock->flags = flags;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400911 INIT_LIST_HEAD(&lock->blist);
912 init_waitqueue_head(&lock->block_q);
913 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400914}
915
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700916void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400917cifs_del_lock_waiters(struct cifsLockInfo *lock)
918{
919 struct cifsLockInfo *li, *tmp;
920 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
921 list_del_init(&li->blist);
922 wake_up(&li->block_q);
923 }
924}
925
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400926#define CIFS_LOCK_OP 0
927#define CIFS_READ_OP 1
928#define CIFS_WRITE_OP 2
929
930/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400931static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700932cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000933 __u64 length, __u8 type, __u16 flags,
934 struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400935 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400936{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300937 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700938 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300939 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400940
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700941 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400942 if (offset + length <= li->offset ||
943 offset >= li->offset + li->length)
944 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400945 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
946 server->ops->compare_fids(cfile, cur_cfile)) {
947 /* shared lock prevents write op through the same fid */
948 if (!(li->type & server->vals->shared_lock_type) ||
949 rw_check != CIFS_WRITE_OP)
950 continue;
951 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700952 if ((type & server->vals->shared_lock_type) &&
953 ((server->ops->compare_fids(cfile, cur_cfile) &&
954 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400955 continue;
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000956 if (rw_check == CIFS_LOCK_OP &&
957 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
958 server->ops->compare_fids(cfile, cur_cfile))
959 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700960 if (conf_lock)
961 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700962 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400963 }
964 return false;
965}
966
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700967bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300968cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000969 __u8 type, __u16 flags,
970 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400971{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300972 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700973 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000974 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300975
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700976 list_for_each_entry(cur, &cinode->llist, llist) {
977 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000978 flags, cfile, conf_lock,
979 rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300980 if (rc)
981 break;
982 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300983
984 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400985}
986
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300987/*
988 * Check if there is another lock that prevents us to set the lock (mandatory
989 * style). If such a lock exists, update the flock structure with its
990 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
991 * or leave it the same if we can't. Returns 0 if we don't need to request to
992 * the server or 1 otherwise.
993 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400994static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300995cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
996 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400997{
998 int rc = 0;
999 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001000 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001001 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001002 bool exist;
1003
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001004 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001005
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001006 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001007 flock->fl_flags, &conf_lock,
1008 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001009 if (exist) {
1010 flock->fl_start = conf_lock->offset;
1011 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1012 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001013 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001014 flock->fl_type = F_RDLCK;
1015 else
1016 flock->fl_type = F_WRLCK;
1017 } else if (!cinode->can_cache_brlcks)
1018 rc = 1;
1019 else
1020 flock->fl_type = F_UNLCK;
1021
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001022 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001023 return rc;
1024}
1025
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001026static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001027cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001028{
David Howells2b0143b2015-03-17 22:25:59 +00001029 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001030 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001031 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001032 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001033}
1034
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001035/*
1036 * Set the byte-range lock (mandatory style). Returns:
1037 * 1) 0, if we set the lock and don't need to request to the server;
1038 * 2) 1, if no locks prevent us but we need to request to the server;
Colin Ian King413d6102018-10-26 19:07:21 +01001039 * 3) -EACCES, if there is a lock that prevents us and wait is false.
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001040 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001041static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001042cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001043 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001044{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001045 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001046 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001047 bool exist;
1048 int rc = 0;
1049
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001050try_again:
1051 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001052 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001053
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001054 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001055 lock->type, lock->flags, &conf_lock,
1056 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001057 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001058 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001059 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001060 return rc;
1061 }
1062
1063 if (!exist)
1064 rc = 1;
1065 else if (!wait)
1066 rc = -EACCES;
1067 else {
1068 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001069 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001070 rc = wait_event_interruptible(lock->block_q,
1071 (lock->blist.prev == &lock->blist) &&
1072 (lock->blist.next == &lock->blist));
1073 if (!rc)
1074 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001075 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001076 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001077 }
1078
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001079 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001080 return rc;
1081}
1082
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001083/*
1084 * Check if there is another lock that prevents us to set the lock (posix
1085 * style). If such a lock exists, update the flock structure with its
1086 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1087 * or leave it the same if we can't. Returns 0 if we don't need to request to
1088 * the server or 1 otherwise.
1089 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001090static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001091cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1092{
1093 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001094 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001095 unsigned char saved_type = flock->fl_type;
1096
Pavel Shilovsky50792762011-10-29 17:17:57 +04001097 if ((flock->fl_flags & FL_POSIX) == 0)
1098 return 1;
1099
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001100 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001101 posix_test_lock(file, flock);
1102
1103 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1104 flock->fl_type = saved_type;
1105 rc = 1;
1106 }
1107
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001108 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001109 return rc;
1110}
1111
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001112/*
1113 * Set the byte-range lock (posix style). Returns:
1114 * 1) 0, if we set the lock and don't need to request to the server;
1115 * 2) 1, if we need to request to the server;
1116 * 3) <0, if the error occurs while setting the lock.
1117 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001118static int
1119cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1120{
Al Viro496ad9a2013-01-23 17:07:38 -05001121 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001122 int rc = 1;
1123
1124 if ((flock->fl_flags & FL_POSIX) == 0)
1125 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001126
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001127try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001128 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001129 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001130 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001131 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001132 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001133
1134 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001135 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001136 if (rc == FILE_LOCK_DEFERRED) {
NeilBrownada5c1d2018-11-30 10:04:08 +11001137 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_blocker);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001138 if (!rc)
1139 goto try_again;
NeilBrowncb03f942018-11-30 10:04:08 +11001140 locks_delete_block(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001141 }
Steve French9ebb3892012-04-01 13:52:54 -05001142 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001143}
1144
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001145int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001146cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001147{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001148 unsigned int xid;
1149 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001150 struct cifsLockInfo *li, *tmp;
1151 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001152 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001153 LOCKING_ANDX_RANGE *buf, *cur;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001154 static const int types[] = {
1155 LOCKING_ANDX_LARGE_FILES,
1156 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1157 };
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001158 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001159
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001160 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001161 tcon = tlink_tcon(cfile->tlink);
1162
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001163 /*
1164 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001165 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001166 */
1167 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001168 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001169 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001170 return -EINVAL;
1171 }
1172
Ross Lagerwall92a81092019-01-08 18:30:56 +00001173 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1174 PAGE_SIZE);
1175 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1176 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001177 max_num = (max_buf - sizeof(struct smb_hdr)) /
1178 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001179 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001180 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001181 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001182 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001183 }
1184
1185 for (i = 0; i < 2; i++) {
1186 cur = buf;
1187 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001188 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001189 if (li->type != types[i])
1190 continue;
1191 cur->Pid = cpu_to_le16(li->pid);
1192 cur->LengthLow = cpu_to_le32((u32)li->length);
1193 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1194 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1195 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1196 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001197 stored_rc = cifs_lockv(xid, tcon,
1198 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001199 (__u8)li->type, 0, num,
1200 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001201 if (stored_rc)
1202 rc = stored_rc;
1203 cur = buf;
1204 num = 0;
1205 } else
1206 cur++;
1207 }
1208
1209 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001210 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001211 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001212 if (stored_rc)
1213 rc = stored_rc;
1214 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001215 }
1216
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001217 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001218 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001219 return rc;
1220}
1221
Jeff Layton3d224622016-05-24 06:27:44 -04001222static __u32
1223hash_lockowner(fl_owner_t owner)
1224{
1225 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1226}
1227
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001228struct lock_to_push {
1229 struct list_head llist;
1230 __u64 offset;
1231 __u64 length;
1232 __u32 pid;
1233 __u16 netfid;
1234 __u8 type;
1235};
1236
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001237static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001238cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001239{
David Howells2b0143b2015-03-17 22:25:59 +00001240 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001241 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001242 struct file_lock *flock;
1243 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001244 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001245 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001246 struct list_head locks_to_send, *el;
1247 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001248 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001249
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001250 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001251
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001252 if (!flctx)
1253 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001254
Jeff Laytone084c1b2015-02-16 14:32:03 -05001255 spin_lock(&flctx->flc_lock);
1256 list_for_each(el, &flctx->flc_posix) {
1257 count++;
1258 }
1259 spin_unlock(&flctx->flc_lock);
1260
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001261 INIT_LIST_HEAD(&locks_to_send);
1262
1263 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001264 * Allocating count locks is enough because no FL_POSIX locks can be
1265 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001266 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001267 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001268 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001269 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1270 if (!lck) {
1271 rc = -ENOMEM;
1272 goto err_out;
1273 }
1274 list_add_tail(&lck->llist, &locks_to_send);
1275 }
1276
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001277 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001278 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001279 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001280 if (el == &locks_to_send) {
1281 /*
1282 * The list ended. We don't have enough allocated
1283 * structures - something is really wrong.
1284 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001285 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001286 break;
1287 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001288 length = 1 + flock->fl_end - flock->fl_start;
1289 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1290 type = CIFS_RDLCK;
1291 else
1292 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001293 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001294 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001295 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001296 lck->length = length;
1297 lck->type = type;
1298 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001299 }
Jeff Layton6109c852015-01-16 15:05:57 -05001300 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001301
1302 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001303 int stored_rc;
1304
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001305 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001306 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001307 lck->type, 0);
1308 if (stored_rc)
1309 rc = stored_rc;
1310 list_del(&lck->llist);
1311 kfree(lck);
1312 }
1313
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001314out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001315 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001316 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001317err_out:
1318 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1319 list_del(&lck->llist);
1320 kfree(lck);
1321 }
1322 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001323}
1324
1325static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001326cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001327{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001328 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001329 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001330 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001331 int rc = 0;
1332
1333 /* we are going to update can_cache_brlcks here - need a write access */
1334 down_write(&cinode->lock_sem);
1335 if (!cinode->can_cache_brlcks) {
1336 up_write(&cinode->lock_sem);
1337 return rc;
1338 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001339
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001340 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001341 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1342 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001343 rc = cifs_push_posix_locks(cfile);
1344 else
1345 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001346
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001347 cinode->can_cache_brlcks = false;
1348 up_write(&cinode->lock_sem);
1349 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001350}
1351
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001352static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001353cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001354 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001356 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001357 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001358 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001359 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001360 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001361 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001362 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001364 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001365 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001366 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001367 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001368 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001369 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001370 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001371 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001373 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001374 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001375 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001376 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001377 *lock = 1;
1378 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001379 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001380 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001381 *unlock = 1;
1382 /* Check if unlock includes more than one lock range */
1383 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001384 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001385 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001386 *lock = 1;
1387 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001388 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001389 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001390 *lock = 1;
1391 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001392 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001393 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001394 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001396 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001397}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001399static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001400cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001401 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001402{
1403 int rc = 0;
1404 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001405 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1406 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001407 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001408 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001410 if (posix_lck) {
1411 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001412
1413 rc = cifs_posix_lock_test(file, flock);
1414 if (!rc)
1415 return rc;
1416
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001417 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001418 posix_lock_type = CIFS_RDLCK;
1419 else
1420 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001421 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1422 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001423 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001424 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 return rc;
1426 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001427
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001428 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001429 if (!rc)
1430 return rc;
1431
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001432 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001433 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1434 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001435 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001436 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1437 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001438 flock->fl_type = F_UNLCK;
1439 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001440 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1441 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001442 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001443 }
1444
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001445 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001446 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001447 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001448 }
1449
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001450 type &= ~server->vals->exclusive_lock_type;
1451
1452 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1453 type | server->vals->shared_lock_type,
1454 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001455 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001456 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1457 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001458 flock->fl_type = F_RDLCK;
1459 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001460 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1461 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001462 } else
1463 flock->fl_type = F_WRLCK;
1464
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001465 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001466}
1467
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001468void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001469cifs_move_llist(struct list_head *source, struct list_head *dest)
1470{
1471 struct list_head *li, *tmp;
1472 list_for_each_safe(li, tmp, source)
1473 list_move(li, dest);
1474}
1475
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001476void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001477cifs_free_llist(struct list_head *llist)
1478{
1479 struct cifsLockInfo *li, *tmp;
1480 list_for_each_entry_safe(li, tmp, llist, llist) {
1481 cifs_del_lock_waiters(li);
1482 list_del(&li->llist);
1483 kfree(li);
1484 }
1485}
1486
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001487int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001488cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1489 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001490{
1491 int rc = 0, stored_rc;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001492 static const int types[] = {
1493 LOCKING_ANDX_LARGE_FILES,
1494 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1495 };
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001496 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001497 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001498 LOCKING_ANDX_RANGE *buf, *cur;
1499 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001500 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001501 struct cifsLockInfo *li, *tmp;
1502 __u64 length = 1 + flock->fl_end - flock->fl_start;
1503 struct list_head tmp_llist;
1504
1505 INIT_LIST_HEAD(&tmp_llist);
1506
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001507 /*
1508 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001509 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001510 */
1511 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001512 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001513 return -EINVAL;
1514
Ross Lagerwall92a81092019-01-08 18:30:56 +00001515 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1516 PAGE_SIZE);
1517 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1518 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001519 max_num = (max_buf - sizeof(struct smb_hdr)) /
1520 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001521 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001522 if (!buf)
1523 return -ENOMEM;
1524
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001525 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001526 for (i = 0; i < 2; i++) {
1527 cur = buf;
1528 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001529 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001530 if (flock->fl_start > li->offset ||
1531 (flock->fl_start + length) <
1532 (li->offset + li->length))
1533 continue;
1534 if (current->tgid != li->pid)
1535 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001536 if (types[i] != li->type)
1537 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001538 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001539 /*
1540 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001541 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001542 */
1543 list_del(&li->llist);
1544 cifs_del_lock_waiters(li);
1545 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001546 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001547 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001548 cur->Pid = cpu_to_le16(li->pid);
1549 cur->LengthLow = cpu_to_le32((u32)li->length);
1550 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1551 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1552 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1553 /*
1554 * We need to save a lock here to let us add it again to
1555 * the file's list if the unlock range request fails on
1556 * the server.
1557 */
1558 list_move(&li->llist, &tmp_llist);
1559 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001560 stored_rc = cifs_lockv(xid, tcon,
1561 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001562 li->type, num, 0, buf);
1563 if (stored_rc) {
1564 /*
1565 * We failed on the unlock range
1566 * request - add all locks from the tmp
1567 * list to the head of the file's list.
1568 */
1569 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001570 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001571 rc = stored_rc;
1572 } else
1573 /*
1574 * The unlock range request succeed -
1575 * free the tmp list.
1576 */
1577 cifs_free_llist(&tmp_llist);
1578 cur = buf;
1579 num = 0;
1580 } else
1581 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001582 }
1583 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001584 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001585 types[i], num, 0, buf);
1586 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001587 cifs_move_llist(&tmp_llist,
1588 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001589 rc = stored_rc;
1590 } else
1591 cifs_free_llist(&tmp_llist);
1592 }
1593 }
1594
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001595 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001596 kfree(buf);
1597 return rc;
1598}
1599
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001600static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001601cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001602 bool wait_flag, bool posix_lck, int lock, int unlock,
1603 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001604{
1605 int rc = 0;
1606 __u64 length = 1 + flock->fl_end - flock->fl_start;
1607 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1608 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001609 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001610 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001611
1612 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001613 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001614
1615 rc = cifs_posix_lock_set(file, flock);
1616 if (!rc || rc < 0)
1617 return rc;
1618
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001619 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001620 posix_lock_type = CIFS_RDLCK;
1621 else
1622 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001623
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001624 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001625 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001626
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001627 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001628 hash_lockowner(flock->fl_owner),
1629 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001630 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001631 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001632 }
1633
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001634 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001635 struct cifsLockInfo *lock;
1636
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001637 lock = cifs_lock_init(flock->fl_start, length, type,
1638 flock->fl_flags);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001639 if (!lock)
1640 return -ENOMEM;
1641
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001642 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001643 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001644 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001645 return rc;
1646 }
1647 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001648 goto out;
1649
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001650 /*
1651 * Windows 7 server can delay breaking lease from read to None
1652 * if we set a byte-range lock on a file - break it explicitly
1653 * before sending the lock to the server to be sure the next
1654 * read won't conflict with non-overlapted locks due to
1655 * pagereading.
1656 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001657 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1658 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001659 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001660 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1661 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001662 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001663 }
1664
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001665 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1666 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001667 if (rc) {
1668 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001669 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001670 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001671
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001672 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001673 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001674 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001675
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001676out:
Aurelien Aptelbc31d0c2019-03-14 18:44:16 +01001677 if (flock->fl_flags & FL_POSIX) {
1678 /*
1679 * If this is a request to remove all locks because we
1680 * are closing the file, it doesn't matter if the
1681 * unlocking failed as both cifs.ko and the SMB server
1682 * remove the lock on file close
1683 */
1684 if (rc) {
1685 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1686 if (!(flock->fl_flags & FL_CLOSE))
1687 return rc;
1688 }
Benjamin Coddington4f656362015-10-22 13:38:14 -04001689 rc = locks_lock_file_wait(file, flock);
Aurelien Aptelbc31d0c2019-03-14 18:44:16 +01001690 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001691 return rc;
1692}
1693
1694int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1695{
1696 int rc, xid;
1697 int lock = 0, unlock = 0;
1698 bool wait_flag = false;
1699 bool posix_lck = false;
1700 struct cifs_sb_info *cifs_sb;
1701 struct cifs_tcon *tcon;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001702 struct cifsFileInfo *cfile;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001703 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001704
1705 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001706 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001707
Joe Perchesf96637b2013-05-04 22:12:25 -05001708 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1709 cmd, flock->fl_flags, flock->fl_type,
1710 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001711
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001712 cfile = (struct cifsFileInfo *)file->private_data;
1713 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001714
1715 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1716 tcon->ses->server);
Al Viro7119e222014-10-22 00:25:12 -04001717 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001718
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001719 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001720 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1721 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1722 posix_lck = true;
1723 /*
1724 * BB add code here to normalize offset and length to account for
1725 * negative length which we can not accept over the wire.
1726 */
1727 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001728 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001729 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001730 return rc;
1731 }
1732
1733 if (!lock && !unlock) {
1734 /*
1735 * if no lock or unlock then nothing to do since we do not
1736 * know what it is
1737 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001738 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001739 return -EOPNOTSUPP;
1740 }
1741
1742 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1743 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001744 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 return rc;
1746}
1747
Jeff Layton597b0272012-03-23 14:40:56 -04001748/*
1749 * update the file size (if needed) after a write. Should be called with
1750 * the inode->i_lock held
1751 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001752void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001753cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1754 unsigned int bytes_written)
1755{
1756 loff_t end_of_write = offset + bytes_written;
1757
1758 if (end_of_write > cifsi->server_eof)
1759 cifsi->server_eof = end_of_write;
1760}
1761
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001762static ssize_t
1763cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1764 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765{
1766 int rc = 0;
1767 unsigned int bytes_written = 0;
1768 unsigned int total_written;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001769 struct cifs_tcon *tcon;
1770 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001771 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001772 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001773 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001774 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775
Al Viro35c265e2014-08-19 20:25:34 -04001776 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1777 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001779 tcon = tlink_tcon(open_file->tlink);
1780 server = tcon->ses->server;
1781
1782 if (!server->ops->sync_write)
1783 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001784
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001785 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 for (total_written = 0; write_size > total_written;
1788 total_written += bytes_written) {
1789 rc = -EAGAIN;
1790 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001791 struct kvec iov[2];
1792 unsigned int len;
1793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 /* we could deadlock if we called
1796 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001797 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001799 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 if (rc != 0)
1801 break;
1802 }
Steve French3e844692005-10-03 13:37:24 -07001803
David Howells2b0143b2015-03-17 22:25:59 +00001804 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001805 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001806 /* iov[0] is reserved for smb header */
1807 iov[1].iov_base = (char *)write_data + total_written;
1808 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001809 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001810 io_parms.tcon = tcon;
1811 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001812 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001813 rc = server->ops->sync_write(xid, &open_file->fid,
1814 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 }
1816 if (rc || (bytes_written == 0)) {
1817 if (total_written)
1818 break;
1819 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001820 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 return rc;
1822 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001823 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001824 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001825 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001826 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001827 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001828 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 }
1830
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001831 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832
Jeff Layton7da4b492010-10-15 15:34:00 -04001833 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001834 spin_lock(&d_inode(dentry)->i_lock);
1835 if (*offset > d_inode(dentry)->i_size)
1836 i_size_write(d_inode(dentry), *offset);
1837 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 }
David Howells2b0143b2015-03-17 22:25:59 +00001839 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001840 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 return total_written;
1842}
1843
Jeff Layton6508d902010-09-29 19:51:11 -04001844struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1845 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001846{
1847 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001848 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1849
1850 /* only filter by fsuid on multiuser mounts */
1851 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1852 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001853
Dave Wysochanskicb248812019-10-03 15:16:27 +10001854 spin_lock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001855 /* we could simply get the first_list_entry since write-only entries
1856 are always at the end of the list but since the first entry might
1857 have a close pending, we go through the whole list */
1858 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001859 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001860 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001861 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001862 if (!open_file->invalidHandle) {
1863 /* found a good file */
1864 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001865 cifsFileInfo_get(open_file);
Dave Wysochanskicb248812019-10-03 15:16:27 +10001866 spin_unlock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001867 return open_file;
1868 } /* else might as well continue, and look for
1869 another, or simply have the caller reopen it
1870 again rather than trying to fix this handle */
1871 } else /* write only file */
1872 break; /* write only files are last so must be done */
1873 }
Dave Wysochanskicb248812019-10-03 15:16:27 +10001874 spin_unlock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001875 return NULL;
1876}
Steve French630f3f0c2007-10-25 21:17:17 +00001877
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001878/* Return -EBADF if no handle is found and general rc otherwise */
1879int
1880cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
1881 struct cifsFileInfo **ret_file)
Steve French6148a742005-10-05 12:23:19 -07001882{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001883 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001884 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001885 bool any_available = false;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001886 int rc = -EBADF;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001887 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001888
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001889 *ret_file = NULL;
1890
1891 /*
1892 * Having a null inode here (because mapping->host was set to zero by
1893 * the VFS or MM) should not happen but we had reports of on oops (due
1894 * to it being zero) during stress testcases so we need to check for it
1895 */
Steve French60808232006-04-22 15:53:05 +00001896
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001897 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001898 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001899 dump_stack();
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001900 return rc;
Steve French60808232006-04-22 15:53:05 +00001901 }
1902
Jeff Laytond3892292010-11-02 16:22:50 -04001903 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1904
Jeff Layton6508d902010-09-29 19:51:11 -04001905 /* only filter by fsuid on multiuser mounts */
1906 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1907 fsuid_only = false;
1908
Dave Wysochanskicb248812019-10-03 15:16:27 +10001909 spin_lock(&cifs_inode->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001910refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001911 if (refind > MAX_REOPEN_ATT) {
Dave Wysochanskicb248812019-10-03 15:16:27 +10001912 spin_unlock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001913 return rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001914 }
Steve French6148a742005-10-05 12:23:19 -07001915 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001916 if (!any_available && open_file->pid != current->tgid)
1917 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001918 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001919 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001920 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001921 if (!open_file->invalidHandle) {
1922 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001923 cifsFileInfo_get(open_file);
Dave Wysochanskicb248812019-10-03 15:16:27 +10001924 spin_unlock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001925 *ret_file = open_file;
1926 return 0;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001927 } else {
1928 if (!inv_file)
1929 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001930 }
Steve French6148a742005-10-05 12:23:19 -07001931 }
1932 }
Jeff Layton2846d382008-09-22 21:33:33 -04001933 /* couldn't find useable FH with same pid, try any available */
1934 if (!any_available) {
1935 any_available = true;
1936 goto refind_writable;
1937 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001938
1939 if (inv_file) {
1940 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001941 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001942 }
1943
Dave Wysochanskicb248812019-10-03 15:16:27 +10001944 spin_unlock(&cifs_inode->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001945
1946 if (inv_file) {
1947 rc = cifs_reopen_file(inv_file, false);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001948 if (!rc) {
1949 *ret_file = inv_file;
1950 return 0;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001951 }
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001952
Ronnie Sahlberg487317c2019-06-05 10:38:38 +10001953 spin_lock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001954 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
Ronnie Sahlberg487317c2019-06-05 10:38:38 +10001955 spin_unlock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001956 cifsFileInfo_put(inv_file);
1957 ++refind;
1958 inv_file = NULL;
Dave Wysochanskicb248812019-10-03 15:16:27 +10001959 spin_lock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001960 goto refind_writable;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001961 }
1962
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08001963 return rc;
1964}
1965
1966struct cifsFileInfo *
1967find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
1968{
1969 struct cifsFileInfo *cfile;
1970 int rc;
1971
1972 rc = cifs_get_writable_file(cifs_inode, fsuid_only, &cfile);
1973 if (rc)
1974 cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc);
1975
1976 return cfile;
Steve French6148a742005-10-05 12:23:19 -07001977}
1978
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10001979int
1980cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
1981 struct cifsFileInfo **ret_file)
1982{
1983 struct list_head *tmp;
1984 struct cifsFileInfo *cfile;
1985 struct cifsInodeInfo *cinode;
1986 char *full_path;
1987
1988 *ret_file = NULL;
1989
1990 spin_lock(&tcon->open_file_lock);
1991 list_for_each(tmp, &tcon->openFileList) {
1992 cfile = list_entry(tmp, struct cifsFileInfo,
1993 tlist);
1994 full_path = build_path_from_dentry(cfile->dentry);
1995 if (full_path == NULL) {
1996 spin_unlock(&tcon->open_file_lock);
1997 return -ENOMEM;
1998 }
1999 if (strcmp(full_path, name)) {
2000 kfree(full_path);
2001 continue;
2002 }
2003
2004 kfree(full_path);
2005 cinode = CIFS_I(d_inode(cfile->dentry));
2006 spin_unlock(&tcon->open_file_lock);
2007 return cifs_get_writable_file(cinode, 0, ret_file);
2008 }
2009
2010 spin_unlock(&tcon->open_file_lock);
2011 return -ENOENT;
2012}
2013
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002014int
2015cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2016 struct cifsFileInfo **ret_file)
2017{
2018 struct list_head *tmp;
2019 struct cifsFileInfo *cfile;
2020 struct cifsInodeInfo *cinode;
2021 char *full_path;
2022
2023 *ret_file = NULL;
2024
2025 spin_lock(&tcon->open_file_lock);
2026 list_for_each(tmp, &tcon->openFileList) {
2027 cfile = list_entry(tmp, struct cifsFileInfo,
2028 tlist);
2029 full_path = build_path_from_dentry(cfile->dentry);
2030 if (full_path == NULL) {
2031 spin_unlock(&tcon->open_file_lock);
2032 return -ENOMEM;
2033 }
2034 if (strcmp(full_path, name)) {
2035 kfree(full_path);
2036 continue;
2037 }
2038
2039 kfree(full_path);
2040 cinode = CIFS_I(d_inode(cfile->dentry));
2041 spin_unlock(&tcon->open_file_lock);
2042 *ret_file = find_readable_file(cinode, 0);
2043 return *ret_file ? 0 : -ENOENT;
2044 }
2045
2046 spin_unlock(&tcon->open_file_lock);
2047 return -ENOENT;
2048}
2049
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
2051{
2052 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002053 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 char *write_data;
2055 int rc = -EFAULT;
2056 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07002058 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059
2060 if (!mapping || !mapping->host)
2061 return -EFAULT;
2062
2063 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064
2065 offset += (loff_t)from;
2066 write_data = kmap(page);
2067 write_data += from;
2068
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002069 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 kunmap(page);
2071 return -EIO;
2072 }
2073
2074 /* racing with truncate? */
2075 if (offset > mapping->host->i_size) {
2076 kunmap(page);
2077 return 0; /* don't care */
2078 }
2079
2080 /* check to make sure that we are not extending the file */
2081 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002082 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002084 rc = cifs_get_writable_file(CIFS_I(mapping->host), false, &open_file);
2085 if (!rc) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04002086 bytes_written = cifs_write(open_file, open_file->pid,
2087 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04002088 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07002090 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002091 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07002092 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002093 else if (bytes_written < 0)
2094 rc = bytes_written;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002095 else
2096 rc = -EFAULT;
Steve French6148a742005-10-05 12:23:19 -07002097 } else {
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002098 cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
2099 if (!is_retryable_error(rc))
2100 rc = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 }
2102
2103 kunmap(page);
2104 return rc;
2105}
2106
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002107static struct cifs_writedata *
2108wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2109 pgoff_t end, pgoff_t *index,
2110 unsigned int *found_pages)
2111{
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002112 struct cifs_writedata *wdata;
2113
2114 wdata = cifs_writedata_alloc((unsigned int)tofind,
2115 cifs_writev_complete);
2116 if (!wdata)
2117 return NULL;
2118
Jan Kara9c19a9c2017-11-15 17:35:26 -08002119 *found_pages = find_get_pages_range_tag(mapping, index, end,
2120 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002121 return wdata;
2122}
2123
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002124static unsigned int
2125wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2126 struct address_space *mapping,
2127 struct writeback_control *wbc,
2128 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2129{
2130 unsigned int nr_pages = 0, i;
2131 struct page *page;
2132
2133 for (i = 0; i < found_pages; i++) {
2134 page = wdata->pages[i];
2135 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002136 * At this point we hold neither the i_pages lock nor the
2137 * page lock: the page may be truncated or invalidated
2138 * (changing page->mapping to NULL), or even swizzled
2139 * back from swapper_space to tmpfs file mapping
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002140 */
2141
2142 if (nr_pages == 0)
2143 lock_page(page);
2144 else if (!trylock_page(page))
2145 break;
2146
2147 if (unlikely(page->mapping != mapping)) {
2148 unlock_page(page);
2149 break;
2150 }
2151
2152 if (!wbc->range_cyclic && page->index > end) {
2153 *done = true;
2154 unlock_page(page);
2155 break;
2156 }
2157
2158 if (*next && (page->index != *next)) {
2159 /* Not next consecutive page */
2160 unlock_page(page);
2161 break;
2162 }
2163
2164 if (wbc->sync_mode != WB_SYNC_NONE)
2165 wait_on_page_writeback(page);
2166
2167 if (PageWriteback(page) ||
2168 !clear_page_dirty_for_io(page)) {
2169 unlock_page(page);
2170 break;
2171 }
2172
2173 /*
2174 * This actually clears the dirty bit in the radix tree.
2175 * See cifs_writepage() for more commentary.
2176 */
2177 set_page_writeback(page);
2178 if (page_offset(page) >= i_size_read(mapping->host)) {
2179 *done = true;
2180 unlock_page(page);
2181 end_page_writeback(page);
2182 break;
2183 }
2184
2185 wdata->pages[i] = page;
2186 *next = page->index + 1;
2187 ++nr_pages;
2188 }
2189
2190 /* reset index to refind any pages skipped */
2191 if (nr_pages == 0)
2192 *index = wdata->pages[0]->index + 1;
2193
2194 /* put any pages we aren't going to use */
2195 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002196 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002197 wdata->pages[i] = NULL;
2198 }
2199
2200 return nr_pages;
2201}
2202
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002203static int
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002204wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2205 struct address_space *mapping, struct writeback_control *wbc)
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002206{
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002207 int rc;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002208 struct TCP_Server_Info *server =
2209 tlink_tcon(wdata->cfile->tlink)->ses->server;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002210
2211 wdata->sync_mode = wbc->sync_mode;
2212 wdata->nr_pages = nr_pages;
2213 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002214 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002215 wdata->tailsz = min(i_size_read(mapping->host) -
2216 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002217 (loff_t)PAGE_SIZE);
2218 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002219 wdata->pid = wdata->cfile->pid;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002220
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002221 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2222 if (rc)
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002223 return rc;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002224
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002225 if (wdata->cfile->invalidHandle)
2226 rc = -EAGAIN;
2227 else
2228 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002229
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002230 return rc;
2231}
2232
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002234 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235{
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002236 struct inode *inode = mapping->host;
2237 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002238 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002239 bool done = false, scanned = false, range_whole = false;
2240 pgoff_t end, index;
2241 struct cifs_writedata *wdata;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002242 struct cifsFileInfo *cfile = NULL;
Steve French37c0eb42005-10-05 14:50:29 -07002243 int rc = 0;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002244 int saved_rc = 0;
Steve French0cb012d2018-10-11 01:01:02 -05002245 unsigned int xid;
Steve French50c2f752007-07-13 00:33:32 +00002246
Steve French37c0eb42005-10-05 14:50:29 -07002247 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002248 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002249 * one page at a time via cifs_writepage
2250 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002251 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002252 return generic_writepages(mapping, wbc);
2253
Steve French0cb012d2018-10-11 01:01:02 -05002254 xid = get_xid();
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002255 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002256 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002257 end = -1;
2258 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002259 index = wbc->range_start >> PAGE_SHIFT;
2260 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002261 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002262 range_whole = true;
2263 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002264 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002265 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002266retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002267 while (!done && index <= end) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002268 unsigned int i, nr_pages, found_pages, wsize;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002269 pgoff_t next = 0, tofind, saved_index = index;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002270 struct cifs_credits credits_on_stack;
2271 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002272 int get_file_rc = 0;
Steve French37c0eb42005-10-05 14:50:29 -07002273
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002274 if (cfile)
2275 cifsFileInfo_put(cfile);
2276
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002277 rc = cifs_get_writable_file(CIFS_I(inode), false, &cfile);
2278
2279 /* in case of an error store it to return later */
2280 if (rc)
2281 get_file_rc = rc;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002282
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002283 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002284 &wsize, credits);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002285 if (rc != 0) {
2286 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002287 break;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002288 }
Steve French37c0eb42005-10-05 14:50:29 -07002289
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002290 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002291
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002292 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2293 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002294 if (!wdata) {
2295 rc = -ENOMEM;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002296 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002297 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002298 break;
2299 }
2300
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002301 if (found_pages == 0) {
2302 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002303 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002304 break;
2305 }
2306
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002307 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2308 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002309
2310 /* nothing to write? */
2311 if (nr_pages == 0) {
2312 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002313 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002314 continue;
2315 }
2316
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002317 wdata->credits = credits_on_stack;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002318 wdata->cfile = cfile;
2319 cfile = NULL;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002320
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002321 if (!wdata->cfile) {
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002322 cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
2323 get_file_rc);
2324 if (is_retryable_error(get_file_rc))
2325 rc = get_file_rc;
2326 else
2327 rc = -EBADF;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002328 } else
2329 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002330
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002331 for (i = 0; i < nr_pages; ++i)
2332 unlock_page(wdata->pages[i]);
2333
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002334 /* send failure -- clean up the mess */
2335 if (rc != 0) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002336 add_credits_and_wake_if(server, &wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002337 for (i = 0; i < nr_pages; ++i) {
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002338 if (is_retryable_error(rc))
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002339 redirty_page_for_writepage(wbc,
2340 wdata->pages[i]);
2341 else
2342 SetPageError(wdata->pages[i]);
2343 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002344 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002345 }
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002346 if (!is_retryable_error(rc))
Jeff Layton941b8532011-01-11 07:24:01 -05002347 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002348 }
2349 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002350
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002351 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2352 index = saved_index;
2353 continue;
2354 }
2355
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002356 /* Return immediately if we received a signal during writing */
2357 if (is_interrupt_error(rc)) {
2358 done = true;
2359 break;
2360 }
2361
2362 if (rc != 0 && saved_rc == 0)
2363 saved_rc = rc;
2364
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002365 wbc->nr_to_write -= nr_pages;
2366 if (wbc->nr_to_write <= 0)
2367 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002368
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002369 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002370 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002371
Steve French37c0eb42005-10-05 14:50:29 -07002372 if (!scanned && !done) {
2373 /*
2374 * We hit the last page and there is more work to be done: wrap
2375 * back to the start of the file
2376 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002377 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002378 index = 0;
2379 goto retry;
2380 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002381
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002382 if (saved_rc != 0)
2383 rc = saved_rc;
2384
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002385 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002386 mapping->writeback_index = index;
2387
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002388 if (cfile)
2389 cifsFileInfo_put(cfile);
Steve French0cb012d2018-10-11 01:01:02 -05002390 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 return rc;
2392}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002394static int
2395cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002397 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002398 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002400 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002402 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002403 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002404 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002405
2406 /*
2407 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2408 *
2409 * A writepage() implementation always needs to do either this,
2410 * or re-dirty the page with "redirty_page_for_writepage()" in
2411 * the case of a failure.
2412 *
2413 * Just unlocking the page will cause the radix tree tag-bits
2414 * to fail to update with the state of the page correctly.
2415 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002416 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002417retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002418 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002419 if (is_retryable_error(rc)) {
2420 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
Jeff Layton97b37f22017-05-25 06:59:52 -04002421 goto retry_write;
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002422 redirty_page_for_writepage(wbc, page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002423 } else if (rc != 0) {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002424 SetPageError(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002425 mapping_set_error(page->mapping, rc);
2426 } else {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002427 SetPageUptodate(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002428 }
Linus Torvaldscb876f42006-12-23 16:19:07 -08002429 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002430 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002431 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 return rc;
2433}
2434
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002435static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2436{
2437 int rc = cifs_writepage_locked(page, wbc);
2438 unlock_page(page);
2439 return rc;
2440}
2441
Nick Piggind9414772008-09-24 11:32:59 -04002442static int cifs_write_end(struct file *file, struct address_space *mapping,
2443 loff_t pos, unsigned len, unsigned copied,
2444 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445{
Nick Piggind9414772008-09-24 11:32:59 -04002446 int rc;
2447 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002448 struct cifsFileInfo *cfile = file->private_data;
2449 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2450 __u32 pid;
2451
2452 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2453 pid = cfile->pid;
2454 else
2455 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456
Joe Perchesf96637b2013-05-04 22:12:25 -05002457 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002458 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002459
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002460 if (PageChecked(page)) {
2461 if (copied == len)
2462 SetPageUptodate(page);
2463 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002464 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002465 SetPageUptodate(page);
2466
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002468 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002469 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002470 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002471
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002472 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 /* this is probably better than directly calling
2474 partialpage_write since in this function the file handle is
2475 known which we might as well leverage */
2476 /* BB check if anything else missing out of ppw
2477 such as updating last write time */
2478 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002479 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002480 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002482
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002483 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002484 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002485 rc = copied;
2486 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002487 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 }
2489
Nick Piggind9414772008-09-24 11:32:59 -04002490 if (rc > 0) {
2491 spin_lock(&inode->i_lock);
2492 if (pos > inode->i_size)
2493 i_size_write(inode, pos);
2494 spin_unlock(&inode->i_lock);
2495 }
2496
2497 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002498 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002499
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 return rc;
2501}
2502
Josef Bacik02c24a82011-07-16 20:44:56 -04002503int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2504 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002506 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002508 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002509 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002510 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002511 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002512 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002514 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002515 if (rc)
2516 return rc;
Josef Bacik02c24a82011-07-16 20:44:56 -04002517
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002518 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519
Al Viro35c265e2014-08-19 20:25:34 -04002520 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2521 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002522
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002523 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002524 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002525 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002526 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002527 rc = 0; /* don't care about it in fsync */
2528 }
2529 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002530
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002531 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002532 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2533 server = tcon->ses->server;
2534 if (server->ops->flush)
2535 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2536 else
2537 rc = -ENOSYS;
2538 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002539
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002540 free_xid(xid);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002541 return rc;
2542}
2543
Josef Bacik02c24a82011-07-16 20:44:56 -04002544int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002545{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002546 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002547 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002548 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002549 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002550 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002551 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002552
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002553 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002554 if (rc)
2555 return rc;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002556
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002557 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002558
Al Viro35c265e2014-08-19 20:25:34 -04002559 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2560 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002561
2562 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002563 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2564 server = tcon->ses->server;
2565 if (server->ops->flush)
2566 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2567 else
2568 rc = -ENOSYS;
2569 }
Steve Frenchb298f222009-02-21 21:17:43 +00002570
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002571 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 return rc;
2573}
2574
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575/*
2576 * As file closes, flush all cached write data for this inode checking
2577 * for write behind errors.
2578 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002579int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580{
Al Viro496ad9a2013-01-23 17:07:38 -05002581 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 int rc = 0;
2583
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002584 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002585 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002586
Joe Perchesf96637b2013-05-04 22:12:25 -05002587 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588
2589 return rc;
2590}
2591
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002592static int
2593cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2594{
2595 int rc = 0;
2596 unsigned long i;
2597
2598 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002599 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002600 if (!pages[i]) {
2601 /*
2602 * save number of pages we have already allocated and
2603 * return with ENOMEM error
2604 */
2605 num_pages = i;
2606 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002607 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002608 }
2609 }
2610
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002611 if (rc) {
2612 for (i = 0; i < num_pages; i++)
2613 put_page(pages[i]);
2614 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002615 return rc;
2616}
2617
2618static inline
2619size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2620{
2621 size_t num_pages;
2622 size_t clen;
2623
2624 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002625 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002626
2627 if (cur_len)
2628 *cur_len = clen;
2629
2630 return num_pages;
2631}
2632
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002633static void
Steve French4a5c80d2014-02-07 20:45:12 -06002634cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002635{
2636 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002637 struct cifs_writedata *wdata = container_of(refcount,
2638 struct cifs_writedata, refcount);
2639
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002640 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
Steve French4a5c80d2014-02-07 20:45:12 -06002641 for (i = 0; i < wdata->nr_pages; i++)
2642 put_page(wdata->pages[i]);
2643 cifs_writedata_release(refcount);
2644}
2645
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002646static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2647
Steve French4a5c80d2014-02-07 20:45:12 -06002648static void
2649cifs_uncached_writev_complete(struct work_struct *work)
2650{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002651 struct cifs_writedata *wdata = container_of(work,
2652 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002653 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002654 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2655
2656 spin_lock(&inode->i_lock);
2657 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2658 if (cifsi->server_eof > inode->i_size)
2659 i_size_write(inode, cifsi->server_eof);
2660 spin_unlock(&inode->i_lock);
2661
2662 complete(&wdata->done);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002663 collect_uncached_write_data(wdata->ctx);
2664 /* the below call can possibly free the last ref to aio ctx */
Steve French4a5c80d2014-02-07 20:45:12 -06002665 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002666}
2667
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002668static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002669wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2670 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002671{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002672 size_t save_len, copied, bytes, cur_len = *len;
2673 unsigned long i, nr_pages = *num_pages;
2674
2675 save_len = cur_len;
2676 for (i = 0; i < nr_pages; i++) {
2677 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2678 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2679 cur_len -= copied;
2680 /*
2681 * If we didn't copy as much as we expected, then that
2682 * may mean we trod into an unmapped area. Stop copying
2683 * at that point. On the next pass through the big
2684 * loop, we'll likely end up getting a zero-length
2685 * write and bailing out of it.
2686 */
2687 if (copied < bytes)
2688 break;
2689 }
2690 cur_len = save_len - cur_len;
2691 *len = cur_len;
2692
2693 /*
2694 * If we have no data to send, then that probably means that
2695 * the copy above failed altogether. That's most likely because
2696 * the address in the iovec was bogus. Return -EFAULT and let
2697 * the caller free anything we allocated and bail out.
2698 */
2699 if (!cur_len)
2700 return -EFAULT;
2701
2702 /*
2703 * i + 1 now represents the number of pages we actually used in
2704 * the copy phase above.
2705 */
2706 *num_pages = i + 1;
2707 return 0;
2708}
2709
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002710static int
Long Li8c5f9c12018-10-31 22:13:10 +00002711cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
2712 struct cifs_aio_ctx *ctx)
2713{
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002714 unsigned int wsize;
2715 struct cifs_credits credits;
Long Li8c5f9c12018-10-31 22:13:10 +00002716 int rc;
2717 struct TCP_Server_Info *server =
2718 tlink_tcon(wdata->cfile->tlink)->ses->server;
2719
Long Li8c5f9c12018-10-31 22:13:10 +00002720 do {
Long Lid53e2922019-03-15 07:54:59 +00002721 if (wdata->cfile->invalidHandle) {
Long Li8c5f9c12018-10-31 22:13:10 +00002722 rc = cifs_reopen_file(wdata->cfile, false);
Long Lid53e2922019-03-15 07:54:59 +00002723 if (rc == -EAGAIN)
2724 continue;
2725 else if (rc)
2726 break;
2727 }
2728
2729
2730 /*
2731 * Wait for credits to resend this wdata.
2732 * Note: we are attempting to resend the whole wdata not in
2733 * segments
2734 */
2735 do {
2736 rc = server->ops->wait_mtu_credits(server, wdata->bytes,
2737 &wsize, &credits);
2738 if (rc)
2739 goto fail;
2740
2741 if (wsize < wdata->bytes) {
2742 add_credits_and_wake_if(server, &credits, 0);
2743 msleep(1000);
2744 }
2745 } while (wsize < wdata->bytes);
2746 wdata->credits = credits;
2747
2748 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2749
2750 if (!rc) {
2751 if (wdata->cfile->invalidHandle)
2752 rc = -EAGAIN;
2753 else
2754 rc = server->ops->async_writev(wdata,
Long Li8c5f9c12018-10-31 22:13:10 +00002755 cifs_uncached_writedata_release);
Long Lid53e2922019-03-15 07:54:59 +00002756 }
Long Li8c5f9c12018-10-31 22:13:10 +00002757
Long Lid53e2922019-03-15 07:54:59 +00002758 /* If the write was successfully sent, we are done */
2759 if (!rc) {
2760 list_add_tail(&wdata->list, wdata_list);
2761 return 0;
2762 }
Long Li8c5f9c12018-10-31 22:13:10 +00002763
Long Lid53e2922019-03-15 07:54:59 +00002764 /* Roll back credits and retry if needed */
2765 add_credits_and_wake_if(server, &wdata->credits, 0);
2766 } while (rc == -EAGAIN);
2767
2768fail:
Long Li8c5f9c12018-10-31 22:13:10 +00002769 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Long Li8c5f9c12018-10-31 22:13:10 +00002770 return rc;
2771}
2772
2773static int
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002774cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2775 struct cifsFileInfo *open_file,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002776 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2777 struct cifs_aio_ctx *ctx)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002778{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002779 int rc = 0;
2780 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002781 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002782 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002783 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002784 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002785 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002786 struct TCP_Server_Info *server;
Long Li8c5f9c12018-10-31 22:13:10 +00002787 struct page **pagevec;
2788 size_t start;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002789 unsigned int xid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002790
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002791 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2792 pid = open_file->pid;
2793 else
2794 pid = current->tgid;
2795
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002796 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002797 xid = get_xid();
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002798
2799 do {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002800 unsigned int wsize;
2801 struct cifs_credits credits_on_stack;
2802 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002803
Pavel Shilovsky3e952992019-01-25 11:59:01 -08002804 if (open_file->invalidHandle) {
2805 rc = cifs_reopen_file(open_file, false);
2806 if (rc == -EAGAIN)
2807 continue;
2808 else if (rc)
2809 break;
2810 }
2811
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002812 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002813 &wsize, credits);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002814 if (rc)
2815 break;
2816
Long Lib6bc8a72018-12-16 23:17:04 +00002817 cur_len = min_t(const size_t, len, wsize);
2818
Long Li8c5f9c12018-10-31 22:13:10 +00002819 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05002820 ssize_t result;
2821
2822 result = iov_iter_get_pages_alloc(
Long Lib6bc8a72018-12-16 23:17:04 +00002823 from, &pagevec, cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05002824 if (result < 0) {
Long Li8c5f9c12018-10-31 22:13:10 +00002825 cifs_dbg(VFS,
2826 "direct_writev couldn't get user pages "
2827 "(rc=%zd) iter type %d iov_offset %zd "
2828 "count %zd\n",
Steve Frenchb98e26d2018-11-01 10:54:32 -05002829 result, from->type,
Long Li8c5f9c12018-10-31 22:13:10 +00002830 from->iov_offset, from->count);
2831 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00002832
2833 rc = result;
2834 add_credits_and_wake_if(server, credits, 0);
Long Li8c5f9c12018-10-31 22:13:10 +00002835 break;
2836 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05002837 cur_len = (size_t)result;
Long Li8c5f9c12018-10-31 22:13:10 +00002838 iov_iter_advance(from, cur_len);
2839
2840 nr_pages =
2841 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
2842
2843 wdata = cifs_writedata_direct_alloc(pagevec,
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002844 cifs_uncached_writev_complete);
Long Li8c5f9c12018-10-31 22:13:10 +00002845 if (!wdata) {
2846 rc = -ENOMEM;
2847 add_credits_and_wake_if(server, credits, 0);
2848 break;
2849 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002850
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002851
Long Li8c5f9c12018-10-31 22:13:10 +00002852 wdata->page_offset = start;
2853 wdata->tailsz =
2854 nr_pages > 1 ?
2855 cur_len - (PAGE_SIZE - start) -
2856 (nr_pages - 2) * PAGE_SIZE :
2857 cur_len;
2858 } else {
2859 nr_pages = get_numpages(wsize, len, &cur_len);
2860 wdata = cifs_writedata_alloc(nr_pages,
2861 cifs_uncached_writev_complete);
2862 if (!wdata) {
2863 rc = -ENOMEM;
2864 add_credits_and_wake_if(server, credits, 0);
2865 break;
2866 }
Jeff Layton5d81de82014-02-14 07:20:35 -05002867
Long Li8c5f9c12018-10-31 22:13:10 +00002868 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2869 if (rc) {
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08002870 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00002871 kfree(wdata);
2872 add_credits_and_wake_if(server, credits, 0);
2873 break;
2874 }
2875
2876 num_pages = nr_pages;
2877 rc = wdata_fill_from_iovec(
2878 wdata, from, &cur_len, &num_pages);
2879 if (rc) {
2880 for (i = 0; i < nr_pages; i++)
2881 put_page(wdata->pages[i]);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08002882 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00002883 kfree(wdata);
2884 add_credits_and_wake_if(server, credits, 0);
2885 break;
2886 }
2887
2888 /*
2889 * Bring nr_pages down to the number of pages we
2890 * actually used, and free any pages that we didn't use.
2891 */
2892 for ( ; nr_pages > num_pages; nr_pages--)
2893 put_page(wdata->pages[nr_pages - 1]);
2894
2895 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2896 }
Jeff Layton5d81de82014-02-14 07:20:35 -05002897
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002898 wdata->sync_mode = WB_SYNC_ALL;
2899 wdata->nr_pages = nr_pages;
2900 wdata->offset = (__u64)offset;
2901 wdata->cfile = cifsFileInfo_get(open_file);
2902 wdata->pid = pid;
2903 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002904 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002905 wdata->credits = credits_on_stack;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002906 wdata->ctx = ctx;
2907 kref_get(&ctx->refcount);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002908
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002909 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2910
2911 if (!rc) {
2912 if (wdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08002913 rc = -EAGAIN;
2914 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002915 rc = server->ops->async_writev(wdata,
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002916 cifs_uncached_writedata_release);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002917 }
2918
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002919 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002920 add_credits_and_wake_if(server, &wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002921 kref_put(&wdata->refcount,
2922 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002923 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002924 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002925 iov_iter_advance(from, offset - saved_offset);
2926 continue;
2927 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002928 break;
2929 }
2930
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002931 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002932 offset += cur_len;
2933 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002934 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002935
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002936 free_xid(xid);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002937 return rc;
2938}
2939
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002940static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2941{
2942 struct cifs_writedata *wdata, *tmp;
2943 struct cifs_tcon *tcon;
2944 struct cifs_sb_info *cifs_sb;
2945 struct dentry *dentry = ctx->cfile->dentry;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002946 int rc;
2947
2948 tcon = tlink_tcon(ctx->cfile->tlink);
2949 cifs_sb = CIFS_SB(dentry->d_sb);
2950
2951 mutex_lock(&ctx->aio_mutex);
2952
2953 if (list_empty(&ctx->list)) {
2954 mutex_unlock(&ctx->aio_mutex);
2955 return;
2956 }
2957
2958 rc = ctx->rc;
2959 /*
2960 * Wait for and collect replies for any successful sends in order of
2961 * increasing offset. Once an error is hit, then return without waiting
2962 * for any more replies.
2963 */
2964restart_loop:
2965 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
2966 if (!rc) {
2967 if (!try_wait_for_completion(&wdata->done)) {
2968 mutex_unlock(&ctx->aio_mutex);
2969 return;
2970 }
2971
2972 if (wdata->result)
2973 rc = wdata->result;
2974 else
2975 ctx->total_len += wdata->bytes;
2976
2977 /* resend call if it's a retryable error */
2978 if (rc == -EAGAIN) {
2979 struct list_head tmp_list;
2980 struct iov_iter tmp_from = ctx->iter;
2981
2982 INIT_LIST_HEAD(&tmp_list);
2983 list_del_init(&wdata->list);
2984
Long Li8c5f9c12018-10-31 22:13:10 +00002985 if (ctx->direct_io)
2986 rc = cifs_resend_wdata(
2987 wdata, &tmp_list, ctx);
2988 else {
2989 iov_iter_advance(&tmp_from,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002990 wdata->offset - ctx->pos);
2991
Long Li8c5f9c12018-10-31 22:13:10 +00002992 rc = cifs_write_from_iter(wdata->offset,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002993 wdata->bytes, &tmp_from,
2994 ctx->cfile, cifs_sb, &tmp_list,
2995 ctx);
Long Lid53e2922019-03-15 07:54:59 +00002996
2997 kref_put(&wdata->refcount,
2998 cifs_uncached_writedata_release);
Long Li8c5f9c12018-10-31 22:13:10 +00002999 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003000
3001 list_splice(&tmp_list, &ctx->list);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003002 goto restart_loop;
3003 }
3004 }
3005 list_del_init(&wdata->list);
3006 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
3007 }
3008
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003009 cifs_stats_bytes_written(tcon, ctx->total_len);
3010 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
3011
3012 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3013
3014 mutex_unlock(&ctx->aio_mutex);
3015
3016 if (ctx->iocb && ctx->iocb->ki_complete)
3017 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3018 else
3019 complete(&ctx->done);
3020}
3021
Long Li8c5f9c12018-10-31 22:13:10 +00003022static ssize_t __cifs_writev(
3023 struct kiocb *iocb, struct iov_iter *from, bool direct)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003024{
Al Viroe9d15932015-04-06 22:44:11 -04003025 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003026 ssize_t total_written = 0;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003027 struct cifsFileInfo *cfile;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003028 struct cifs_tcon *tcon;
3029 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003030 struct cifs_aio_ctx *ctx;
Al Virofc56b982016-09-21 18:18:23 -04003031 struct iov_iter saved_from = *from;
Long Li8c5f9c12018-10-31 22:13:10 +00003032 size_t len = iov_iter_count(from);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003033 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003034
Al Viroe9d15932015-04-06 22:44:11 -04003035 /*
Long Li8c5f9c12018-10-31 22:13:10 +00003036 * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
3037 * In this case, fall back to non-direct write function.
3038 * this could be improved by getting pages directly in ITER_KVEC
Al Viroe9d15932015-04-06 22:44:11 -04003039 */
Long Li8c5f9c12018-10-31 22:13:10 +00003040 if (direct && from->type & ITER_KVEC) {
3041 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
3042 direct = false;
3043 }
Al Viroe9d15932015-04-06 22:44:11 -04003044
Al Viro3309dd02015-04-09 12:55:47 -04003045 rc = generic_write_checks(iocb, from);
3046 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003047 return rc;
3048
Al Viro7119e222014-10-22 00:25:12 -04003049 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003050 cfile = file->private_data;
3051 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003052
3053 if (!tcon->ses->server->ops->async_writev)
3054 return -ENOSYS;
3055
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003056 ctx = cifs_aio_ctx_alloc();
3057 if (!ctx)
3058 return -ENOMEM;
3059
3060 ctx->cfile = cifsFileInfo_get(cfile);
3061
3062 if (!is_sync_kiocb(iocb))
3063 ctx->iocb = iocb;
3064
3065 ctx->pos = iocb->ki_pos;
3066
Long Li8c5f9c12018-10-31 22:13:10 +00003067 if (direct) {
3068 ctx->direct_io = true;
3069 ctx->iter = *from;
3070 ctx->len = len;
3071 } else {
3072 rc = setup_aio_ctx_iter(ctx, from, WRITE);
3073 if (rc) {
3074 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3075 return rc;
3076 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003077 }
3078
3079 /* grab a lock here due to read response handlers can access ctx */
3080 mutex_lock(&ctx->aio_mutex);
3081
3082 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
3083 cfile, cifs_sb, &ctx->list, ctx);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003084
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003085 /*
3086 * If at least one write was successfully sent, then discard any rc
3087 * value from the later writes. If the other write succeeds, then
3088 * we'll end up returning whatever was written. If it fails, then
3089 * we'll get a new rc value from that.
3090 */
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003091 if (!list_empty(&ctx->list))
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003092 rc = 0;
3093
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003094 mutex_unlock(&ctx->aio_mutex);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003095
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003096 if (rc) {
3097 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3098 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003099 }
3100
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003101 if (!is_sync_kiocb(iocb)) {
3102 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3103 return -EIOCBQUEUED;
3104 }
3105
3106 rc = wait_for_completion_killable(&ctx->done);
3107 if (rc) {
3108 mutex_lock(&ctx->aio_mutex);
3109 ctx->rc = rc = -EINTR;
3110 total_written = ctx->total_len;
3111 mutex_unlock(&ctx->aio_mutex);
3112 } else {
3113 rc = ctx->rc;
3114 total_written = ctx->total_len;
3115 }
3116
3117 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3118
Al Viroe9d15932015-04-06 22:44:11 -04003119 if (unlikely(!total_written))
3120 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003121
Al Viroe9d15932015-04-06 22:44:11 -04003122 iocb->ki_pos += total_written;
Al Viroe9d15932015-04-06 22:44:11 -04003123 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003124}
3125
Long Li8c5f9c12018-10-31 22:13:10 +00003126ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
3127{
3128 return __cifs_writev(iocb, from, true);
3129}
3130
3131ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
3132{
3133 return __cifs_writev(iocb, from, false);
3134}
3135
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003136static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003137cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003138{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003139 struct file *file = iocb->ki_filp;
3140 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
3141 struct inode *inode = file->f_mapping->host;
3142 struct cifsInodeInfo *cinode = CIFS_I(inode);
3143 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04003144 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003145
Rabin Vincent966681c2017-06-29 16:01:42 +02003146 inode_lock(inode);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003147 /*
3148 * We need to hold the sem to be sure nobody modifies lock list
3149 * with a brlock that prevents writing.
3150 */
3151 down_read(&cinode->lock_sem);
Al Viro5f380c72015-04-07 11:28:12 -04003152
Al Viro3309dd02015-04-09 12:55:47 -04003153 rc = generic_write_checks(iocb, from);
3154 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04003155 goto out;
3156
Al Viro5f380c72015-04-07 11:28:12 -04003157 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Ronnie Sahlberg96457592018-10-04 09:24:38 +10003158 server->vals->exclusive_lock_type, 0,
3159 NULL, CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04003160 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04003161 else
3162 rc = -EACCES;
3163out:
Rabin Vincent966681c2017-06-29 16:01:42 +02003164 up_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05003165 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04003166
Christoph Hellwige2592212016-04-07 08:52:01 -07003167 if (rc > 0)
3168 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003169 return rc;
3170}
3171
3172ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003173cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003174{
Al Viro496ad9a2013-01-23 17:07:38 -05003175 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003176 struct cifsInodeInfo *cinode = CIFS_I(inode);
3177 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3178 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3179 iocb->ki_filp->private_data;
3180 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003181 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003182
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003183 written = cifs_get_writer(cinode);
3184 if (written)
3185 return written;
3186
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003187 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003188 if (cap_unix(tcon->ses) &&
3189 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003190 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04003191 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003192 goto out;
3193 }
Al Viro3dae8752014-04-03 12:05:17 -04003194 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003195 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04003196 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003197 /*
3198 * For non-oplocked files in strict cache mode we need to write the data
3199 * to the server exactly from the pos to pos+len-1 rather than flush all
3200 * affected pages because it may cause a error with mandatory locks on
3201 * these pages but not on the region from pos to ppos+len-1.
3202 */
Al Viro3dae8752014-04-03 12:05:17 -04003203 written = cifs_user_writev(iocb, from);
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003204 if (CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003205 /*
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003206 * We have read level caching and we have just sent a write
3207 * request to the server thus making data in the cache stale.
3208 * Zap the cache and set oplock/lease level to NONE to avoid
3209 * reading stale data from the cache. All subsequent read
3210 * operations will read new data from the server.
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003211 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003212 cifs_zap_mapping(inode);
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003213 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05003214 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003215 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003216 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003217out:
3218 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003219 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003220}
3221
Jeff Layton0471ca32012-05-16 07:13:16 -04003222static struct cifs_readdata *
Long Lif9f5aca2018-05-30 12:47:54 -07003223cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04003224{
3225 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003226
Long Lif9f5aca2018-05-30 12:47:54 -07003227 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04003228 if (rdata != NULL) {
Long Lif9f5aca2018-05-30 12:47:54 -07003229 rdata->pages = pages;
Jeff Layton6993f742012-05-16 07:13:17 -04003230 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003231 INIT_LIST_HEAD(&rdata->list);
3232 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04003233 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04003234 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003235
Jeff Layton0471ca32012-05-16 07:13:16 -04003236 return rdata;
3237}
3238
Long Lif9f5aca2018-05-30 12:47:54 -07003239static struct cifs_readdata *
3240cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
3241{
3242 struct page **pages =
Kees Cook6396bb22018-06-12 14:03:40 -07003243 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Long Lif9f5aca2018-05-30 12:47:54 -07003244 struct cifs_readdata *ret = NULL;
3245
3246 if (pages) {
3247 ret = cifs_readdata_direct_alloc(pages, complete);
3248 if (!ret)
3249 kfree(pages);
3250 }
3251
3252 return ret;
3253}
3254
Jeff Layton6993f742012-05-16 07:13:17 -04003255void
3256cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04003257{
Jeff Layton6993f742012-05-16 07:13:17 -04003258 struct cifs_readdata *rdata = container_of(refcount,
3259 struct cifs_readdata, refcount);
Long Libd3dcc62017-11-22 17:38:47 -07003260#ifdef CONFIG_CIFS_SMB_DIRECT
3261 if (rdata->mr) {
3262 smbd_deregister_mr(rdata->mr);
3263 rdata->mr = NULL;
3264 }
3265#endif
Jeff Layton6993f742012-05-16 07:13:17 -04003266 if (rdata->cfile)
3267 cifsFileInfo_put(rdata->cfile);
3268
Long Lif9f5aca2018-05-30 12:47:54 -07003269 kvfree(rdata->pages);
Jeff Layton0471ca32012-05-16 07:13:16 -04003270 kfree(rdata);
3271}
3272
Jeff Layton2a1bb132012-05-16 07:13:17 -04003273static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003274cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04003275{
3276 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003277 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04003278 unsigned int i;
3279
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003280 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04003281 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3282 if (!page) {
3283 rc = -ENOMEM;
3284 break;
3285 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003286 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04003287 }
3288
3289 if (rc) {
Roberto Bergantinos Corpas31fad7d2019-05-28 09:38:14 +02003290 unsigned int nr_page_failed = i;
3291
3292 for (i = 0; i < nr_page_failed; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003293 put_page(rdata->pages[i]);
3294 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04003295 }
3296 }
3297 return rc;
3298}
3299
3300static void
3301cifs_uncached_readdata_release(struct kref *refcount)
3302{
Jeff Layton1c892542012-05-16 07:13:17 -04003303 struct cifs_readdata *rdata = container_of(refcount,
3304 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003305 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003306
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003307 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003308 for (i = 0; i < rdata->nr_pages; i++) {
3309 put_page(rdata->pages[i]);
Jeff Layton1c892542012-05-16 07:13:17 -04003310 }
3311 cifs_readdata_release(refcount);
3312}
3313
Jeff Layton1c892542012-05-16 07:13:17 -04003314/**
3315 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3316 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05003317 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04003318 *
3319 * This function copies data from a list of pages in a readdata response into
3320 * an array of iovecs. It will first calculate where the data should go
3321 * based on the info in the readdata and then copy the data into that spot.
3322 */
Al Viro7f25bba2014-02-04 14:07:43 -05003323static int
3324cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04003325{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04003326 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003327 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003328
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003329 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003330 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02003331 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003332 size_t written;
3333
David Howells00e23702018-10-22 13:07:28 +01003334 if (unlikely(iov_iter_is_pipe(iter))) {
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003335 void *addr = kmap_atomic(page);
3336
3337 written = copy_to_iter(addr, copy, iter);
3338 kunmap_atomic(addr);
3339 } else
3340 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05003341 remaining -= written;
3342 if (written < copy && iov_iter_count(iter) > 0)
3343 break;
Jeff Layton1c892542012-05-16 07:13:17 -04003344 }
Al Viro7f25bba2014-02-04 14:07:43 -05003345 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04003346}
3347
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003348static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3349
Jeff Layton1c892542012-05-16 07:13:17 -04003350static void
3351cifs_uncached_readv_complete(struct work_struct *work)
3352{
3353 struct cifs_readdata *rdata = container_of(work,
3354 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04003355
3356 complete(&rdata->done);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003357 collect_uncached_read_data(rdata->ctx);
3358 /* the below call can possibly free the last ref to aio ctx */
Jeff Layton1c892542012-05-16 07:13:17 -04003359 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3360}
3361
3362static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003363uncached_fill_pages(struct TCP_Server_Info *server,
3364 struct cifs_readdata *rdata, struct iov_iter *iter,
3365 unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04003366{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003367 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003368 unsigned int i;
3369 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003370 unsigned int page_offset = rdata->page_offset;
Jeff Layton1c892542012-05-16 07:13:17 -04003371
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003372 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07003373 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003374 for (i = 0; i < nr_pages; i++) {
3375 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05003376 size_t n;
Long Li1dbe3462018-05-30 12:47:55 -07003377 unsigned int segment_size = rdata->pagesz;
3378
3379 if (i == 0)
3380 segment_size -= page_offset;
3381 else
3382 page_offset = 0;
3383
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003384
Al Viro71335662016-01-09 19:54:50 -05003385 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04003386 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003387 rdata->pages[i] = NULL;
3388 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04003389 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07003390 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003391 }
Long Li1dbe3462018-05-30 12:47:55 -07003392
Al Viro71335662016-01-09 19:54:50 -05003393 n = len;
Long Li1dbe3462018-05-30 12:47:55 -07003394 if (len >= segment_size)
Al Viro71335662016-01-09 19:54:50 -05003395 /* enough data to fill the page */
Long Li1dbe3462018-05-30 12:47:55 -07003396 n = segment_size;
3397 else
Al Viro71335662016-01-09 19:54:50 -05003398 rdata->tailsz = len;
Long Li1dbe3462018-05-30 12:47:55 -07003399 len -= n;
3400
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003401 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003402 result = copy_page_from_iter(
3403 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003404#ifdef CONFIG_CIFS_SMB_DIRECT
3405 else if (rdata->mr)
3406 result = n;
3407#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003408 else
Long Li1dbe3462018-05-30 12:47:55 -07003409 result = cifs_read_page_from_socket(
3410 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003411 if (result < 0)
3412 break;
3413
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003414 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003415 }
3416
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003417 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3418 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003419}
3420
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003421static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003422cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3423 struct cifs_readdata *rdata, unsigned int len)
3424{
3425 return uncached_fill_pages(server, rdata, NULL, len);
3426}
3427
3428static int
3429cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3430 struct cifs_readdata *rdata,
3431 struct iov_iter *iter)
3432{
3433 return uncached_fill_pages(server, rdata, iter, iter->count);
3434}
3435
Long Li6e6e2b82018-10-31 22:13:09 +00003436static int cifs_resend_rdata(struct cifs_readdata *rdata,
3437 struct list_head *rdata_list,
3438 struct cifs_aio_ctx *ctx)
3439{
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003440 unsigned int rsize;
3441 struct cifs_credits credits;
Long Li6e6e2b82018-10-31 22:13:09 +00003442 int rc;
3443 struct TCP_Server_Info *server =
3444 tlink_tcon(rdata->cfile->tlink)->ses->server;
3445
Long Li6e6e2b82018-10-31 22:13:09 +00003446 do {
Long Li0b0dfd52019-03-15 07:55:00 +00003447 if (rdata->cfile->invalidHandle) {
3448 rc = cifs_reopen_file(rdata->cfile, true);
3449 if (rc == -EAGAIN)
3450 continue;
3451 else if (rc)
3452 break;
3453 }
3454
3455 /*
3456 * Wait for credits to resend this rdata.
3457 * Note: we are attempting to resend the whole rdata not in
3458 * segments
3459 */
3460 do {
3461 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
Long Li6e6e2b82018-10-31 22:13:09 +00003462 &rsize, &credits);
3463
Long Li0b0dfd52019-03-15 07:55:00 +00003464 if (rc)
3465 goto fail;
Long Li6e6e2b82018-10-31 22:13:09 +00003466
Long Li0b0dfd52019-03-15 07:55:00 +00003467 if (rsize < rdata->bytes) {
3468 add_credits_and_wake_if(server, &credits, 0);
3469 msleep(1000);
3470 }
3471 } while (rsize < rdata->bytes);
3472 rdata->credits = credits;
3473
3474 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3475 if (!rc) {
3476 if (rdata->cfile->invalidHandle)
3477 rc = -EAGAIN;
3478 else
3479 rc = server->ops->async_readv(rdata);
Long Li6e6e2b82018-10-31 22:13:09 +00003480 }
Long Li6e6e2b82018-10-31 22:13:09 +00003481
Long Li0b0dfd52019-03-15 07:55:00 +00003482 /* If the read was successfully sent, we are done */
3483 if (!rc) {
3484 /* Add to aio pending list */
3485 list_add_tail(&rdata->list, rdata_list);
3486 return 0;
3487 }
Long Li6e6e2b82018-10-31 22:13:09 +00003488
Long Li0b0dfd52019-03-15 07:55:00 +00003489 /* Roll back credits and retry if needed */
3490 add_credits_and_wake_if(server, &rdata->credits, 0);
3491 } while (rc == -EAGAIN);
Long Li6e6e2b82018-10-31 22:13:09 +00003492
Long Li0b0dfd52019-03-15 07:55:00 +00003493fail:
3494 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Long Li6e6e2b82018-10-31 22:13:09 +00003495 return rc;
3496}
3497
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003498static int
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003499cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003500 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3501 struct cifs_aio_ctx *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003503 struct cifs_readdata *rdata;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003504 unsigned int npages, rsize;
3505 struct cifs_credits credits_on_stack;
3506 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003507 size_t cur_len;
3508 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003509 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003510 struct TCP_Server_Info *server;
Long Li6e6e2b82018-10-31 22:13:09 +00003511 struct page **pagevec;
3512 size_t start;
3513 struct iov_iter direct_iov = ctx->iter;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003514
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003515 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003516
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003517 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3518 pid = open_file->pid;
3519 else
3520 pid = current->tgid;
3521
Long Li6e6e2b82018-10-31 22:13:09 +00003522 if (ctx->direct_io)
3523 iov_iter_advance(&direct_iov, offset - ctx->pos);
3524
Jeff Layton1c892542012-05-16 07:13:17 -04003525 do {
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003526 if (open_file->invalidHandle) {
3527 rc = cifs_reopen_file(open_file, true);
3528 if (rc == -EAGAIN)
3529 continue;
3530 else if (rc)
3531 break;
3532 }
3533
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003534 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003535 &rsize, credits);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003536 if (rc)
3537 break;
3538
3539 cur_len = min_t(const size_t, len, rsize);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003540
Long Li6e6e2b82018-10-31 22:13:09 +00003541 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05003542 ssize_t result;
Long Li6e6e2b82018-10-31 22:13:09 +00003543
Steve Frenchb98e26d2018-11-01 10:54:32 -05003544 result = iov_iter_get_pages_alloc(
Long Li6e6e2b82018-10-31 22:13:09 +00003545 &direct_iov, &pagevec,
3546 cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05003547 if (result < 0) {
Long Li6e6e2b82018-10-31 22:13:09 +00003548 cifs_dbg(VFS,
Long Li54e94ff2018-12-16 22:41:07 +00003549 "couldn't get user pages (rc=%zd)"
Long Li6e6e2b82018-10-31 22:13:09 +00003550 " iter type %d"
3551 " iov_offset %zd count %zd\n",
Steve Frenchb98e26d2018-11-01 10:54:32 -05003552 result, direct_iov.type,
Long Li6e6e2b82018-10-31 22:13:09 +00003553 direct_iov.iov_offset,
3554 direct_iov.count);
3555 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00003556
3557 rc = result;
3558 add_credits_and_wake_if(server, credits, 0);
Long Li6e6e2b82018-10-31 22:13:09 +00003559 break;
3560 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05003561 cur_len = (size_t)result;
Long Li6e6e2b82018-10-31 22:13:09 +00003562 iov_iter_advance(&direct_iov, cur_len);
3563
3564 rdata = cifs_readdata_direct_alloc(
3565 pagevec, cifs_uncached_readv_complete);
3566 if (!rdata) {
3567 add_credits_and_wake_if(server, credits, 0);
3568 rc = -ENOMEM;
3569 break;
3570 }
3571
3572 npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
3573 rdata->page_offset = start;
3574 rdata->tailsz = npages > 1 ?
3575 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
3576 cur_len;
3577
3578 } else {
3579
3580 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3581 /* allocate a readdata struct */
3582 rdata = cifs_readdata_alloc(npages,
Jeff Layton1c892542012-05-16 07:13:17 -04003583 cifs_uncached_readv_complete);
Long Li6e6e2b82018-10-31 22:13:09 +00003584 if (!rdata) {
3585 add_credits_and_wake_if(server, credits, 0);
3586 rc = -ENOMEM;
3587 break;
3588 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003589
Long Li6e6e2b82018-10-31 22:13:09 +00003590 rc = cifs_read_allocate_pages(rdata, npages);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08003591 if (rc) {
3592 kvfree(rdata->pages);
3593 kfree(rdata);
3594 add_credits_and_wake_if(server, credits, 0);
3595 break;
3596 }
Long Li6e6e2b82018-10-31 22:13:09 +00003597
3598 rdata->tailsz = PAGE_SIZE;
3599 }
Jeff Layton1c892542012-05-16 07:13:17 -04003600
3601 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003602 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003603 rdata->offset = offset;
3604 rdata->bytes = cur_len;
3605 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003606 rdata->pagesz = PAGE_SIZE;
3607 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003608 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003609 rdata->credits = credits_on_stack;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003610 rdata->ctx = ctx;
3611 kref_get(&ctx->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003612
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003613 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3614
3615 if (!rc) {
3616 if (rdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003617 rc = -EAGAIN;
3618 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003619 rc = server->ops->async_readv(rdata);
3620 }
3621
Jeff Layton1c892542012-05-16 07:13:17 -04003622 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003623 add_credits_and_wake_if(server, &rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003624 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003625 cifs_uncached_readdata_release);
3626 if (rc == -EAGAIN) {
3627 iov_iter_revert(&direct_iov, cur_len);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003628 continue;
Long Li6e6e2b82018-10-31 22:13:09 +00003629 }
Jeff Layton1c892542012-05-16 07:13:17 -04003630 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631 }
Jeff Layton1c892542012-05-16 07:13:17 -04003632
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003633 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003634 offset += cur_len;
3635 len -= cur_len;
3636 } while (len > 0);
3637
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003638 return rc;
3639}
3640
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003641static void
3642collect_uncached_read_data(struct cifs_aio_ctx *ctx)
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003643{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003644 struct cifs_readdata *rdata, *tmp;
3645 struct iov_iter *to = &ctx->iter;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003646 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003647 int rc;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003648
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003649 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003650
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003651 mutex_lock(&ctx->aio_mutex);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003652
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003653 if (list_empty(&ctx->list)) {
3654 mutex_unlock(&ctx->aio_mutex);
3655 return;
3656 }
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003657
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003658 rc = ctx->rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003659 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003660again:
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003661 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
Jeff Layton1c892542012-05-16 07:13:17 -04003662 if (!rc) {
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003663 if (!try_wait_for_completion(&rdata->done)) {
3664 mutex_unlock(&ctx->aio_mutex);
3665 return;
3666 }
3667
3668 if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003669 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003670 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003671 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003672
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003673 list_del_init(&rdata->list);
3674 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003675
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003676 /*
3677 * Got a part of data and then reconnect has
3678 * happened -- fill the buffer and continue
3679 * reading.
3680 */
3681 if (got_bytes && got_bytes < rdata->bytes) {
Long Li6e6e2b82018-10-31 22:13:09 +00003682 rc = 0;
3683 if (!ctx->direct_io)
3684 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003685 if (rc) {
3686 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003687 cifs_uncached_readdata_release);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003688 continue;
3689 }
3690 }
3691
Long Li6e6e2b82018-10-31 22:13:09 +00003692 if (ctx->direct_io) {
3693 /*
3694 * Re-use rdata as this is a
3695 * direct I/O
3696 */
3697 rc = cifs_resend_rdata(
3698 rdata,
3699 &tmp_list, ctx);
3700 } else {
3701 rc = cifs_send_async_read(
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003702 rdata->offset + got_bytes,
3703 rdata->bytes - got_bytes,
3704 rdata->cfile, cifs_sb,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003705 &tmp_list, ctx);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003706
Long Li6e6e2b82018-10-31 22:13:09 +00003707 kref_put(&rdata->refcount,
3708 cifs_uncached_readdata_release);
3709 }
3710
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003711 list_splice(&tmp_list, &ctx->list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003712
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003713 goto again;
3714 } else if (rdata->result)
3715 rc = rdata->result;
Long Li6e6e2b82018-10-31 22:13:09 +00003716 else if (!ctx->direct_io)
Jeff Layton1c892542012-05-16 07:13:17 -04003717 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003718
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003719 /* if there was a short read -- discard anything left */
3720 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3721 rc = -ENODATA;
Long Li6e6e2b82018-10-31 22:13:09 +00003722
3723 ctx->total_len += rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003724 }
3725 list_del_init(&rdata->list);
3726 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003728
Jérôme Glisse13f59382019-04-10 15:37:47 -04003729 if (!ctx->direct_io)
Long Li6e6e2b82018-10-31 22:13:09 +00003730 ctx->total_len = ctx->len - iov_iter_count(to);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003731
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003732 /* mask nodata case */
3733 if (rc == -ENODATA)
3734 rc = 0;
3735
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003736 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3737
3738 mutex_unlock(&ctx->aio_mutex);
3739
3740 if (ctx->iocb && ctx->iocb->ki_complete)
3741 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3742 else
3743 complete(&ctx->done);
3744}
3745
Long Li6e6e2b82018-10-31 22:13:09 +00003746static ssize_t __cifs_readv(
3747 struct kiocb *iocb, struct iov_iter *to, bool direct)
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003748{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003749 size_t len;
Long Li6e6e2b82018-10-31 22:13:09 +00003750 struct file *file = iocb->ki_filp;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003751 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003752 struct cifsFileInfo *cfile;
Long Li6e6e2b82018-10-31 22:13:09 +00003753 struct cifs_tcon *tcon;
3754 ssize_t rc, total_read = 0;
3755 loff_t offset = iocb->ki_pos;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003756 struct cifs_aio_ctx *ctx;
3757
Long Li6e6e2b82018-10-31 22:13:09 +00003758 /*
3759 * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
3760 * fall back to data copy read path
3761 * this could be improved by getting pages directly in ITER_KVEC
3762 */
3763 if (direct && to->type & ITER_KVEC) {
3764 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
3765 direct = false;
3766 }
3767
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003768 len = iov_iter_count(to);
3769 if (!len)
3770 return 0;
3771
3772 cifs_sb = CIFS_FILE_SB(file);
3773 cfile = file->private_data;
3774 tcon = tlink_tcon(cfile->tlink);
3775
3776 if (!tcon->ses->server->ops->async_readv)
3777 return -ENOSYS;
3778
3779 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3780 cifs_dbg(FYI, "attempting read on write only file instance\n");
3781
3782 ctx = cifs_aio_ctx_alloc();
3783 if (!ctx)
3784 return -ENOMEM;
3785
3786 ctx->cfile = cifsFileInfo_get(cfile);
3787
3788 if (!is_sync_kiocb(iocb))
3789 ctx->iocb = iocb;
3790
David Howells00e23702018-10-22 13:07:28 +01003791 if (iter_is_iovec(to))
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003792 ctx->should_dirty = true;
3793
Long Li6e6e2b82018-10-31 22:13:09 +00003794 if (direct) {
3795 ctx->pos = offset;
3796 ctx->direct_io = true;
3797 ctx->iter = *to;
3798 ctx->len = len;
3799 } else {
3800 rc = setup_aio_ctx_iter(ctx, to, READ);
3801 if (rc) {
3802 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3803 return rc;
3804 }
3805 len = ctx->len;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003806 }
3807
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003808 /* grab a lock here due to read response handlers can access ctx */
3809 mutex_lock(&ctx->aio_mutex);
3810
3811 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
3812
3813 /* if at least one read request send succeeded, then reset rc */
3814 if (!list_empty(&ctx->list))
3815 rc = 0;
3816
3817 mutex_unlock(&ctx->aio_mutex);
3818
3819 if (rc) {
3820 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3821 return rc;
3822 }
3823
3824 if (!is_sync_kiocb(iocb)) {
3825 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3826 return -EIOCBQUEUED;
3827 }
3828
3829 rc = wait_for_completion_killable(&ctx->done);
3830 if (rc) {
3831 mutex_lock(&ctx->aio_mutex);
3832 ctx->rc = rc = -EINTR;
3833 total_read = ctx->total_len;
3834 mutex_unlock(&ctx->aio_mutex);
3835 } else {
3836 rc = ctx->rc;
3837 total_read = ctx->total_len;
3838 }
3839
3840 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3841
Al Viro0165e812014-02-04 14:19:48 -05003842 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003843 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003844 return total_read;
3845 }
3846 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003847}
3848
Long Li6e6e2b82018-10-31 22:13:09 +00003849ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
3850{
3851 return __cifs_readv(iocb, to, true);
3852}
3853
3854ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3855{
3856 return __cifs_readv(iocb, to, false);
3857}
3858
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003859ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003860cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003861{
Al Viro496ad9a2013-01-23 17:07:38 -05003862 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003863 struct cifsInodeInfo *cinode = CIFS_I(inode);
3864 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3865 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3866 iocb->ki_filp->private_data;
3867 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3868 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003869
3870 /*
3871 * In strict cache mode we need to read from the server all the time
3872 * if we don't have level II oplock because the server can delay mtime
3873 * change - so we can't make a decision about inode invalidating.
3874 * And we can also fail with pagereading if there are mandatory locks
3875 * on pages affected by this read but not on the region from pos to
3876 * pos+len-1.
3877 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003878 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003879 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003880
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003881 if (cap_unix(tcon->ses) &&
3882 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3883 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003884 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003885
3886 /*
3887 * We need to hold the sem to be sure nobody modifies lock list
3888 * with a brlock that prevents reading.
3889 */
3890 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003891 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003892 tcon->ses->server->vals->shared_lock_type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10003893 0, NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003894 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003895 up_read(&cinode->lock_sem);
3896 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003897}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003899static ssize_t
3900cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901{
3902 int rc = -EACCES;
3903 unsigned int bytes_read = 0;
3904 unsigned int total_read;
3905 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003906 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003907 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003908 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003909 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003910 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003911 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003912 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003913 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003914 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003915 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003917 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003918 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003919
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003920 /* FIXME: set up handlers for larger reads and/or convert to async */
3921 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3922
Linus Torvalds1da177e2005-04-16 15:20:36 -07003923 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303924 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003925 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303926 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003927 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003928 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003929 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003930 server = tcon->ses->server;
3931
3932 if (!server->ops->sync_read) {
3933 free_xid(xid);
3934 return -ENOSYS;
3935 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003937 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3938 pid = open_file->pid;
3939 else
3940 pid = current->tgid;
3941
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003943 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003945 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3946 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003947 do {
3948 current_read_size = min_t(uint, read_size - total_read,
3949 rsize);
3950 /*
3951 * For windows me and 9x we do not want to request more
3952 * than it negotiated since it will refuse the read
3953 * then.
3954 */
3955 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003956 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003957 current_read_size = min_t(uint,
3958 current_read_size, CIFSMaxBufSize);
3959 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003960 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003961 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962 if (rc != 0)
3963 break;
3964 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003965 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003966 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003967 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003968 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003969 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003970 &bytes_read, &cur_offset,
3971 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003972 } while (rc == -EAGAIN);
3973
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974 if (rc || (bytes_read == 0)) {
3975 if (total_read) {
3976 break;
3977 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003978 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979 return rc;
3980 }
3981 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003982 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003983 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 }
3985 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003986 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003987 return total_read;
3988}
3989
Jeff Laytonca83ce32011-04-12 09:13:44 -04003990/*
3991 * If the page is mmap'ed into a process' page tables, then we need to make
3992 * sure that it doesn't change while being written back.
3993 */
Souptick Joardera5240cb2018-04-15 00:58:25 +05303994static vm_fault_t
Dave Jiang11bac802017-02-24 14:56:41 -08003995cifs_page_mkwrite(struct vm_fault *vmf)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003996{
3997 struct page *page = vmf->page;
3998
3999 lock_page(page);
4000 return VM_FAULT_LOCKED;
4001}
4002
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07004003static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04004004 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07004005 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04004006 .page_mkwrite = cifs_page_mkwrite,
4007};
4008
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004009int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
4010{
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004011 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05004012 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004013
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004014 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004015
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004016 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004017 rc = cifs_zap_mapping(inode);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004018 if (!rc)
4019 rc = generic_file_mmap(file, vma);
4020 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04004021 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004022
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004023 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004024 return rc;
4025}
4026
Linus Torvalds1da177e2005-04-16 15:20:36 -07004027int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
4028{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029 int rc, xid;
4030
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004031 xid = get_xid();
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004032
Jeff Laytonabab0952010-02-12 07:44:18 -05004033 rc = cifs_revalidate_file(file);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004034 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004035 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
4036 rc);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004037 if (!rc)
4038 rc = generic_file_mmap(file, vma);
4039 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04004040 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004041
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004042 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043 return rc;
4044}
4045
Jeff Layton0471ca32012-05-16 07:13:16 -04004046static void
4047cifs_readv_complete(struct work_struct *work)
4048{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004049 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04004050 struct cifs_readdata *rdata = container_of(work,
4051 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04004052
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004053 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004054 for (i = 0; i < rdata->nr_pages; i++) {
4055 struct page *page = rdata->pages[i];
4056
Jeff Layton0471ca32012-05-16 07:13:16 -04004057 lru_cache_add_file(page);
4058
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004059 if (rdata->result == 0 ||
4060 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04004061 flush_dcache_page(page);
4062 SetPageUptodate(page);
4063 }
4064
4065 unlock_page(page);
4066
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004067 if (rdata->result == 0 ||
4068 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04004069 cifs_readpage_to_fscache(rdata->mapping->host, page);
4070
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004071 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004072
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004073 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004074 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04004075 }
Jeff Layton6993f742012-05-16 07:13:17 -04004076 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04004077}
4078
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004079static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004080readpages_fill_pages(struct TCP_Server_Info *server,
4081 struct cifs_readdata *rdata, struct iov_iter *iter,
4082 unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004083{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004084 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004085 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004086 u64 eof;
4087 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004088 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07004089 unsigned int page_offset = rdata->page_offset;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004090
4091 /* determine the eof that the server (probably) has */
4092 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004093 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05004094 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004095
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004096 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004097 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004098 for (i = 0; i < nr_pages; i++) {
4099 struct page *page = rdata->pages[i];
Long Li1dbe3462018-05-30 12:47:55 -07004100 unsigned int to_read = rdata->pagesz;
4101 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004102
Long Li1dbe3462018-05-30 12:47:55 -07004103 if (i == 0)
4104 to_read -= page_offset;
4105 else
4106 page_offset = 0;
4107
4108 n = to_read;
4109
4110 if (len >= to_read) {
4111 len -= to_read;
Jeff Layton8321fec2012-09-19 06:22:32 -07004112 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004113 /* enough for partial page, fill and zero the rest */
Long Li1dbe3462018-05-30 12:47:55 -07004114 zero_user(page, len + page_offset, to_read - len);
Al Viro71335662016-01-09 19:54:50 -05004115 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07004116 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004117 } else if (page->index > eof_index) {
4118 /*
4119 * The VFS will not try to do readahead past the
4120 * i_size, but it's possible that we have outstanding
4121 * writes with gaps in the middle and the i_size hasn't
4122 * caught up yet. Populate those with zeroed out pages
4123 * to prevent the VFS from repeatedly attempting to
4124 * fill them until the writes are flushed.
4125 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004126 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004127 lru_cache_add_file(page);
4128 flush_dcache_page(page);
4129 SetPageUptodate(page);
4130 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004131 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004132 rdata->pages[i] = NULL;
4133 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07004134 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004135 } else {
4136 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004137 lru_cache_add_file(page);
4138 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004139 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004140 rdata->pages[i] = NULL;
4141 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07004142 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004143 }
Jeff Layton8321fec2012-09-19 06:22:32 -07004144
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004145 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07004146 result = copy_page_from_iter(
4147 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07004148#ifdef CONFIG_CIFS_SMB_DIRECT
4149 else if (rdata->mr)
4150 result = n;
4151#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004152 else
Long Li1dbe3462018-05-30 12:47:55 -07004153 result = cifs_read_page_from_socket(
4154 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07004155 if (result < 0)
4156 break;
4157
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004158 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004159 }
4160
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004161 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
4162 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004163}
4164
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004165static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004166cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
4167 struct cifs_readdata *rdata, unsigned int len)
4168{
4169 return readpages_fill_pages(server, rdata, NULL, len);
4170}
4171
4172static int
4173cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
4174 struct cifs_readdata *rdata,
4175 struct iov_iter *iter)
4176{
4177 return readpages_fill_pages(server, rdata, iter, iter->count);
4178}
4179
4180static int
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004181readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
4182 unsigned int rsize, struct list_head *tmplist,
4183 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
4184{
4185 struct page *page, *tpage;
4186 unsigned int expected_index;
4187 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07004188 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004189
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004190 INIT_LIST_HEAD(tmplist);
4191
Nikolay Borisovf86196e2019-01-03 15:29:02 -08004192 page = lru_to_page(page_list);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004193
4194 /*
4195 * Lock the page and put it in the cache. Since no one else
4196 * should have access to this page, we're safe to simply set
4197 * PG_locked without checking it first.
4198 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004199 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004200 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07004201 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004202
4203 /* give up if we can't stick it in the cache */
4204 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004205 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004206 return rc;
4207 }
4208
4209 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004210 *offset = (loff_t)page->index << PAGE_SHIFT;
4211 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004212 *nr_pages = 1;
4213 list_move_tail(&page->lru, tmplist);
4214
4215 /* now try and add more pages onto the request */
4216 expected_index = page->index + 1;
4217 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
4218 /* discontinuity ? */
4219 if (page->index != expected_index)
4220 break;
4221
4222 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004223 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004224 break;
4225
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004226 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07004227 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004228 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004229 break;
4230 }
4231 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004232 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004233 expected_index++;
4234 (*nr_pages)++;
4235 }
4236 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237}
4238
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239static int cifs_readpages(struct file *file, struct address_space *mapping,
4240 struct list_head *page_list, unsigned num_pages)
4241{
Jeff Layton690c5e32011-10-19 15:30:16 -04004242 int rc;
4243 struct list_head tmplist;
4244 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04004245 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004246 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04004247 pid_t pid;
Steve French0cb012d2018-10-11 01:01:02 -05004248 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249
Steve French0cb012d2018-10-11 01:01:02 -05004250 xid = get_xid();
Jeff Layton690c5e32011-10-19 15:30:16 -04004251 /*
Suresh Jayaraman566982362010-07-05 18:13:25 +05304252 * Reads as many pages as possible from fscache. Returns -ENOBUFS
4253 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00004254 *
4255 * After this point, every page in the list might have PG_fscache set,
4256 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman566982362010-07-05 18:13:25 +05304257 */
4258 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
4259 &num_pages);
Steve French0cb012d2018-10-11 01:01:02 -05004260 if (rc == 0) {
4261 free_xid(xid);
Jeff Layton690c5e32011-10-19 15:30:16 -04004262 return rc;
Steve French0cb012d2018-10-11 01:01:02 -05004263 }
Suresh Jayaraman566982362010-07-05 18:13:25 +05304264
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004265 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4266 pid = open_file->pid;
4267 else
4268 pid = current->tgid;
4269
Jeff Layton690c5e32011-10-19 15:30:16 -04004270 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004271 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272
Joe Perchesf96637b2013-05-04 22:12:25 -05004273 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4274 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04004275
4276 /*
4277 * Start with the page at end of list and move it to private
4278 * list. Do the same with any following pages until we hit
4279 * the rsize limit, hit an index discontinuity, or run out of
4280 * pages. Issue the async read and then start the loop again
4281 * until the list is empty.
4282 *
4283 * Note that list order is important. The page_list is in
4284 * the order of declining indexes. When we put the pages in
4285 * the rdata->pages, then we want them in increasing order.
4286 */
4287 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004288 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04004289 loff_t offset;
4290 struct page *page, *tpage;
4291 struct cifs_readdata *rdata;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004292 struct cifs_credits credits_on_stack;
4293 struct cifs_credits *credits = &credits_on_stack;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004294
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004295 if (open_file->invalidHandle) {
4296 rc = cifs_reopen_file(open_file, true);
4297 if (rc == -EAGAIN)
4298 continue;
4299 else if (rc)
4300 break;
4301 }
4302
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004303 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004304 &rsize, credits);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004305 if (rc)
4306 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307
Jeff Layton690c5e32011-10-19 15:30:16 -04004308 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004309 * Give up immediately if rsize is too small to read an entire
4310 * page. The VFS will fall back to readpage. We should never
4311 * reach this point however since we set ra_pages to 0 when the
4312 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04004313 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004314 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004315 add_credits_and_wake_if(server, credits, 0);
Steve French0cb012d2018-10-11 01:01:02 -05004316 free_xid(xid);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004317 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004320 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
4321 &nr_pages, &offset, &bytes);
4322 if (rc) {
4323 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04004325 }
4326
Jeff Layton0471ca32012-05-16 07:13:16 -04004327 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04004328 if (!rdata) {
4329 /* best to give up if we're out of mem */
4330 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4331 list_del(&page->lru);
4332 lru_cache_add_file(page);
4333 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004334 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04004335 }
4336 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004337 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04004338 break;
4339 }
4340
Jeff Layton6993f742012-05-16 07:13:17 -04004341 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04004342 rdata->mapping = mapping;
4343 rdata->offset = offset;
4344 rdata->bytes = bytes;
4345 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004346 rdata->pagesz = PAGE_SIZE;
Long Li1dbe3462018-05-30 12:47:55 -07004347 rdata->tailsz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07004348 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004349 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004350 rdata->credits = credits_on_stack;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004351
4352 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4353 list_del(&page->lru);
4354 rdata->pages[rdata->nr_pages++] = page;
4355 }
Jeff Layton690c5e32011-10-19 15:30:16 -04004356
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004357 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4358
4359 if (!rc) {
4360 if (rdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004361 rc = -EAGAIN;
4362 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004363 rc = server->ops->async_readv(rdata);
4364 }
4365
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004366 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004367 add_credits_and_wake_if(server, &rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004368 for (i = 0; i < rdata->nr_pages; i++) {
4369 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04004370 lru_cache_add_file(page);
4371 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004372 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04004374 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04004375 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004376 break;
4377 }
Jeff Layton6993f742012-05-16 07:13:17 -04004378
4379 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380 }
4381
David Howells54afa992013-09-04 17:10:39 +00004382 /* Any pages that have been shown to fscache but didn't get added to
4383 * the pagecache must be uncached before they get returned to the
4384 * allocator.
4385 */
4386 cifs_fscache_readpages_cancel(mapping->host, page_list);
Steve French0cb012d2018-10-11 01:01:02 -05004387 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388 return rc;
4389}
4390
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01004391/*
4392 * cifs_readpage_worker must be called with the page pinned
4393 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394static int cifs_readpage_worker(struct file *file, struct page *page,
4395 loff_t *poffset)
4396{
4397 char *read_data;
4398 int rc;
4399
Suresh Jayaraman566982362010-07-05 18:13:25 +05304400 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05004401 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304402 if (rc == 0)
4403 goto read_complete;
4404
Linus Torvalds1da177e2005-04-16 15:20:36 -07004405 read_data = kmap(page);
4406 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004407
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004408 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004409
Linus Torvalds1da177e2005-04-16 15:20:36 -07004410 if (rc < 0)
4411 goto io_error;
4412 else
Joe Perchesf96637b2013-05-04 22:12:25 -05004413 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004414
Steve French9b9c5be2018-09-22 12:07:06 -05004415 /* we do not want atime to be less than mtime, it broke some apps */
4416 file_inode(file)->i_atime = current_time(file_inode(file));
4417 if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4418 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4419 else
4420 file_inode(file)->i_atime = current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004421
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004422 if (PAGE_SIZE > rc)
4423 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004424
4425 flush_dcache_page(page);
4426 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304427
4428 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05004429 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304430
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004432
Linus Torvalds1da177e2005-04-16 15:20:36 -07004433io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004434 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004435 unlock_page(page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304436
4437read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 return rc;
4439}
4440
4441static int cifs_readpage(struct file *file, struct page *page)
4442{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004443 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004445 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004446
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004447 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004448
4449 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304450 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004451 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304452 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453 }
4454
Joe Perchesf96637b2013-05-04 22:12:25 -05004455 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00004456 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457
4458 rc = cifs_readpage_worker(file, page, &offset);
4459
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004460 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461 return rc;
4462}
4463
Steve Frencha403a0a2007-07-26 15:54:16 +00004464static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4465{
4466 struct cifsFileInfo *open_file;
4467
Dave Wysochanskicb248812019-10-03 15:16:27 +10004468 spin_lock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004469 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04004470 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Dave Wysochanskicb248812019-10-03 15:16:27 +10004471 spin_unlock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004472 return 1;
4473 }
4474 }
Dave Wysochanskicb248812019-10-03 15:16:27 +10004475 spin_unlock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004476 return 0;
4477}
4478
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479/* We do not want to update the file size from server for inodes
4480 open for write - to avoid races with writepage extending
4481 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004482 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483 but this is tricky to do without racing with writebehind
4484 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00004485bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486{
Steve Frencha403a0a2007-07-26 15:54:16 +00004487 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00004488 return true;
Steve French23e7dd72005-10-20 13:44:56 -07004489
Steve Frencha403a0a2007-07-26 15:54:16 +00004490 if (is_inode_writable(cifsInode)) {
4491 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08004492 struct cifs_sb_info *cifs_sb;
4493
Steve Frenchc32a0b62006-01-12 14:41:28 -08004494 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00004495 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004496 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08004497 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00004498 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08004499 }
4500
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004501 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00004502 return true;
Steve French7ba526312007-02-08 18:14:13 +00004503
Steve French4b18f2a2008-04-29 00:06:05 +00004504 return false;
Steve French23e7dd72005-10-20 13:44:56 -07004505 } else
Steve French4b18f2a2008-04-29 00:06:05 +00004506 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507}
4508
Nick Piggind9414772008-09-24 11:32:59 -04004509static int cifs_write_begin(struct file *file, struct address_space *mapping,
4510 loff_t pos, unsigned len, unsigned flags,
4511 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512{
Sachin Prabhu466bd312013-09-13 14:11:57 +01004513 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004514 pgoff_t index = pos >> PAGE_SHIFT;
4515 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004516 loff_t page_start = pos & PAGE_MASK;
4517 loff_t i_size;
4518 struct page *page;
4519 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520
Joe Perchesf96637b2013-05-04 22:12:25 -05004521 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04004522
Sachin Prabhu466bd312013-09-13 14:11:57 +01004523start:
Nick Piggin54566b22009-01-04 12:00:53 -08004524 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004525 if (!page) {
4526 rc = -ENOMEM;
4527 goto out;
4528 }
Nick Piggind9414772008-09-24 11:32:59 -04004529
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004530 if (PageUptodate(page))
4531 goto out;
Steve French8a236262007-03-06 00:31:00 +00004532
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004533 /*
4534 * If we write a full page it will be up to date, no need to read from
4535 * the server. If the write is short, we'll end up doing a sync write
4536 * instead.
4537 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004538 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004539 goto out;
4540
4541 /*
4542 * optimize away the read when we have an oplock, and we're not
4543 * expecting to use any of the data we'd be reading in. That
4544 * is, when the page lies beyond the EOF, or straddles the EOF
4545 * and the write will cover all of the existing data.
4546 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004547 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004548 i_size = i_size_read(mapping->host);
4549 if (page_start >= i_size ||
4550 (offset == 0 && (pos + len) >= i_size)) {
4551 zero_user_segments(page, 0, offset,
4552 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004553 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004554 /*
4555 * PageChecked means that the parts of the page
4556 * to which we're not writing are considered up
4557 * to date. Once the data is copied to the
4558 * page, it can be set uptodate.
4559 */
4560 SetPageChecked(page);
4561 goto out;
4562 }
4563 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564
Sachin Prabhu466bd312013-09-13 14:11:57 +01004565 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004566 /*
4567 * might as well read a page, it is fast enough. If we get
4568 * an error, we don't need to return it. cifs_write_end will
4569 * do a sync write instead since PG_uptodate isn't set.
4570 */
4571 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004572 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004573 oncethru = 1;
4574 goto start;
Steve French8a236262007-03-06 00:31:00 +00004575 } else {
4576 /* we could try using another file handle if there is one -
4577 but how would we lock it to prevent close of that handle
4578 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04004579 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00004580 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004581out:
4582 *pagep = page;
4583 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584}
4585
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304586static int cifs_release_page(struct page *page, gfp_t gfp)
4587{
4588 if (PagePrivate(page))
4589 return 0;
4590
4591 return cifs_fscache_release_page(page, gfp);
4592}
4593
Lukas Czernerd47992f2013-05-21 23:17:23 -04004594static void cifs_invalidate_page(struct page *page, unsigned int offset,
4595 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304596{
4597 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4598
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004599 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304600 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4601}
4602
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004603static int cifs_launder_page(struct page *page)
4604{
4605 int rc = 0;
4606 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004607 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004608 struct writeback_control wbc = {
4609 .sync_mode = WB_SYNC_ALL,
4610 .nr_to_write = 0,
4611 .range_start = range_start,
4612 .range_end = range_end,
4613 };
4614
Joe Perchesf96637b2013-05-04 22:12:25 -05004615 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004616
4617 if (clear_page_dirty_for_io(page))
4618 rc = cifs_writepage_locked(page, &wbc);
4619
4620 cifs_fscache_invalidate_page(page, page->mapping->host);
4621 return rc;
4622}
4623
Tejun Heo9b646972010-07-20 22:09:02 +02004624void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04004625{
4626 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4627 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00004628 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004629 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004630 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004631 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004632 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004633
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004634 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10004635 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004636
4637 server->ops->downgrade_oplock(server, cinode,
4638 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
4639
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004640 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004641 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004642 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4643 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004644 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004645 }
4646
Jeff Layton3bc303c2009-09-21 06:47:50 -04004647 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004648 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05004649 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00004650 else
Al Viro8737c932009-12-24 06:47:55 -05004651 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004652 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004653 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004654 rc = filemap_fdatawait(inode->i_mapping);
4655 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004656 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004657 }
Joe Perchesf96637b2013-05-04 22:12:25 -05004658 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004659 }
4660
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004661 rc = cifs_push_locks(cfile);
4662 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004663 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004664
Jeff Layton3bc303c2009-09-21 06:47:50 -04004665 /*
4666 * releasing stale oplock after recent reconnect of smb session using
4667 * a now incorrect file handle is not a data integrity issue but do
4668 * not bother sending an oplock release if session to server still is
4669 * disconnected since oplock already released by the server
4670 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00004671 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004672 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4673 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05004674 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004675 }
Aurelien Aptelb98749c2019-03-29 10:49:12 +01004676 _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004677 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004678}
4679
Steve Frenchdca69282013-11-11 16:42:37 -06004680/*
4681 * The presence of cifs_direct_io() in the address space ops vector
4682 * allowes open() O_DIRECT flags which would have failed otherwise.
4683 *
4684 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4685 * so this method should never be called.
4686 *
4687 * Direct IO is not yet supported in the cached mode.
4688 */
4689static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07004690cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06004691{
4692 /*
4693 * FIXME
4694 * Eventually need to support direct IO for non forcedirectio mounts
4695 */
4696 return -EINVAL;
4697}
4698
4699
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004700const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004701 .readpage = cifs_readpage,
4702 .readpages = cifs_readpages,
4703 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07004704 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004705 .write_begin = cifs_write_begin,
4706 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004707 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304708 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06004709 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304710 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004711 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004713
4714/*
4715 * cifs_readpages requires the server to support a buffer large enough to
4716 * contain the header plus one complete page of data. Otherwise, we need
4717 * to leave cifs_readpages out of the address space operations.
4718 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004719const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004720 .readpage = cifs_readpage,
4721 .writepage = cifs_writepage,
4722 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004723 .write_begin = cifs_write_begin,
4724 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004725 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304726 .releasepage = cifs_release_page,
4727 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004728 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004729};