blob: dcdbcb6f09f825c978545188c3f98309bb6e7202 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Long Libd3dcc62017-11-22 17:38:47 -070045#include "smbdirect.h"
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900143 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400144 cifs_put_tlink(tlink);
145
146 if (rc)
147 goto posix_open_ret;
148
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
151
152 if (!pinode)
153 goto posix_open_ret; /* caller does not need info */
154
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
161 if (!*pinode) {
162 rc = -ENOMEM;
163 goto posix_open_ret;
164 }
165 } else {
166 cifs_fattr_to_inode(*pinode, &fattr);
167 }
168
169posix_open_ret:
170 kfree(presp_data);
171 return rc;
172}
173
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174static int
175cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300178{
179 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700180 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500182 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300183 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700184 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400185 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700187 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
Steve French1013e762017-09-22 01:40:27 -0500227 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
228 if (f_flags & O_SYNC)
229 create_options |= CREATE_WRITE_THROUGH;
230
231 if (f_flags & O_DIRECT)
232 create_options |= CREATE_NO_BUFFER;
233
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400234 oparms.tcon = tcon;
235 oparms.cifs_sb = cifs_sb;
236 oparms.desired_access = desired_access;
237 oparms.create_options = create_options;
238 oparms.disposition = disposition;
239 oparms.path = full_path;
240 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400241 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400242
243 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300244
245 if (rc)
246 goto out;
247
248 if (tcon->unix_ext)
249 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
250 xid);
251 else
252 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600253 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300254
255out:
256 kfree(buf);
257 return rc;
258}
259
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400260static bool
261cifs_has_mand_locks(struct cifsInodeInfo *cinode)
262{
263 struct cifs_fid_locks *cur;
264 bool has_locks = false;
265
266 down_read(&cinode->lock_sem);
267 list_for_each_entry(cur, &cinode->llist, llist) {
268 if (!list_empty(&cur->locks)) {
269 has_locks = true;
270 break;
271 }
272 }
273 up_read(&cinode->lock_sem);
274 return has_locks;
275}
276
Jeff Layton15ecb432010-10-15 15:34:02 -0400277struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700278cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400279 struct tcon_link *tlink, __u32 oplock)
280{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500281 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000282 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700283 struct cifsInodeInfo *cinode = CIFS_I(inode);
284 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700285 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700286 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400287 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400288
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700289 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
290 if (cfile == NULL)
291 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400292
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700293 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
294 if (!fdlocks) {
295 kfree(cfile);
296 return NULL;
297 }
298
299 INIT_LIST_HEAD(&fdlocks->locks);
300 fdlocks->cfile = cfile;
301 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700302 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700303 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700304 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700305
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700306 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700307 cfile->pid = current->tgid;
308 cfile->uid = current_fsuid();
309 cfile->dentry = dget(dentry);
310 cfile->f_flags = file->f_flags;
311 cfile->invalidHandle = false;
312 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700313 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700314 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500315 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400316
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100317 cifs_sb_active(inode->i_sb);
318
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400319 /*
320 * If the server returned a read oplock and we have mandatory brlocks,
321 * set oplock level to None.
322 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400323 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500324 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400325 oplock = 0;
326 }
327
Steve French3afca262016-09-22 18:58:16 -0500328 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400329 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700330 oplock = fid->pending_open->oplock;
331 list_del(&fid->pending_open->olist);
332
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400333 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400334 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700335
336 list_add(&cfile->tlist, &tcon->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500337
Jeff Layton15ecb432010-10-15 15:34:02 -0400338 /* if readable file instance put first in list*/
339 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700340 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400341 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700342 list_add_tail(&cfile->flist, &cinode->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500343 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400344
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400345 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400346 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400347
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700348 file->private_data = cfile;
349 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400350}
351
Jeff Layton764a1b12012-07-25 14:59:54 -0400352struct cifsFileInfo *
353cifsFileInfo_get(struct cifsFileInfo *cifs_file)
354{
Steve French3afca262016-09-22 18:58:16 -0500355 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400356 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500357 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400358 return cifs_file;
359}
360
Steve Frenchcdff08e2010-10-21 22:46:14 +0000361/*
362 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400363 * the filehandle out on the server. Must be called without holding
Steve French3afca262016-09-22 18:58:16 -0500364 * tcon->open_file_lock and cifs_file->file_info_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000365 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400366void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
367{
David Howells2b0143b2015-03-17 22:25:59 +0000368 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000369 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700370 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300371 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100372 struct super_block *sb = inode->i_sb;
373 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000374 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700375 struct cifs_fid fid;
376 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000377 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000378
Steve French3afca262016-09-22 18:58:16 -0500379 spin_lock(&tcon->open_file_lock);
380
381 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400382 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500383 spin_unlock(&cifs_file->file_info_lock);
384 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000385 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400386 }
Steve French3afca262016-09-22 18:58:16 -0500387 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000388
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700389 if (server->ops->get_lease_key)
390 server->ops->get_lease_key(inode, &fid);
391
392 /* store open in pending opens to make sure we don't miss lease break */
393 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
394
Steve Frenchcdff08e2010-10-21 22:46:14 +0000395 /* remove it from the lists */
396 list_del(&cifs_file->flist);
397 list_del(&cifs_file->tlist);
398
399 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500400 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000401 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700402 /*
403 * In strict cache mode we need invalidate mapping on the last
404 * close because it may cause a error when we open this file
405 * again and get at least level II oplock.
406 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300407 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400408 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300409 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000410 }
Steve French3afca262016-09-22 18:58:16 -0500411
412 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000413
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000414 oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
Jeff Laytonad635942011-07-26 12:20:17 -0400415
Steve Frenchcdff08e2010-10-21 22:46:14 +0000416 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700417 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400418 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700419
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400420 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700421 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400422 server->ops->close(xid, tcon, &cifs_file->fid);
423 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000424 }
425
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000426 if (oplock_break_cancelled)
427 cifs_done_oplock_break(cifsi);
428
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700429 cifs_del_pending_open(&open);
430
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700431 /*
432 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000433 * is closed anyway.
434 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700435 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700436 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000437 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400438 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000439 kfree(li);
440 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700441 list_del(&cifs_file->llist->llist);
442 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700443 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000444
445 cifs_put_tlink(cifs_file->tlink);
446 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100447 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000448 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400449}
450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700452
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453{
454 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400455 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400456 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700458 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000459 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400460 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700461 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300463 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700464 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700465 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400467 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
469 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400470 tlink = cifs_sb_tlink(cifs_sb);
471 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400472 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400473 return PTR_ERR(tlink);
474 }
475 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700476 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500478 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530480 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400481 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 }
483
Joe Perchesf96637b2013-05-04 22:12:25 -0500484 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000485 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000486
Namjae Jeon787aded2014-08-22 14:22:51 +0900487 if (file->f_flags & O_DIRECT &&
488 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
489 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
490 file->f_op = &cifs_file_direct_nobrl_ops;
491 else
492 file->f_op = &cifs_file_direct_ops;
493 }
494
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700495 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000496 oplock = REQ_OPLOCK;
497 else
498 oplock = 0;
499
Steve French64cc2c62009-03-04 19:54:08 +0000500 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400501 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
502 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000503 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400504 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000505 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700506 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000507 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500508 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300509 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000510 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
511 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500512 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
513 tcon->ses->serverName,
514 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000515 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000516 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
517 (rc != -EOPNOTSUPP)) /* path not found or net err */
518 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700519 /*
520 * Else fallthrough to retry open the old way on network i/o
521 * or DFS errors.
522 */
Steve French276a74a2009-03-03 18:00:34 +0000523 }
524
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700525 if (server->ops->get_lease_key)
526 server->ops->get_lease_key(inode, &fid);
527
528 cifs_add_pending_open(&fid, tlink, &open);
529
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300530 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700531 if (server->ops->get_lease_key)
532 server->ops->get_lease_key(inode, &fid);
533
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300534 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700535 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700536 if (rc) {
537 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300538 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700539 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300540 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400541
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700542 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
543 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700544 if (server->ops->close)
545 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700546 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 rc = -ENOMEM;
548 goto out;
549 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530551 cifs_fscache_set_inode_cookie(inode, file);
552
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300553 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700554 /*
555 * Time to set mode which we can not set earlier due to
556 * problems creating new read-only files.
557 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300558 struct cifs_unix_set_info_args args = {
559 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800560 .uid = INVALID_UID, /* no change */
561 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300562 .ctime = NO_CHANGE_64,
563 .atime = NO_CHANGE_64,
564 .mtime = NO_CHANGE_64,
565 .device = 0,
566 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700567 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
568 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 }
570
571out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400573 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400574 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 return rc;
576}
577
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400578static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
579
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700580/*
581 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400582 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700583 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400584static int
585cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400587 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000588 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400589 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 int rc = 0;
591
Rabin Vincent560d3882017-05-03 17:17:21 +0200592 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400593 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400594 /* can cache locks - no need to relock */
595 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400596 return rc;
597 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400599 if (cap_unix(tcon->ses) &&
600 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
601 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
602 rc = cifs_push_posix_locks(cfile);
603 else
604 rc = tcon->ses->server->ops->push_mand_locks(cfile);
605
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400606 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 return rc;
608}
609
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700610static int
611cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612{
613 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400614 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400615 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000617 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700618 struct TCP_Server_Info *server;
619 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000620 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700622 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500624 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400625 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400627 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700628 mutex_lock(&cfile->fh_mutex);
629 if (!cfile->invalidHandle) {
630 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530631 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400632 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530633 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 }
635
David Howells2b0143b2015-03-17 22:25:59 +0000636 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700638 tcon = tlink_tcon(cfile->tlink);
639 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000640
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700641 /*
642 * Can not grab rename sem here because various ops, including those
643 * that already have the rename sem can end up causing writepage to get
644 * called and if the server was down that means we end up here, and we
645 * can never tell if the caller already has the rename_sem.
646 */
647 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000649 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700650 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400651 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000652 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 }
654
Joe Perchesf96637b2013-05-04 22:12:25 -0500655 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
656 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300658 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 oplock = REQ_OPLOCK;
660 else
Steve French4b18f2a2008-04-29 00:06:05 +0000661 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400663 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000664 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400665 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400666 /*
667 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
668 * original open. Must mask them off for a reopen.
669 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700670 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400671 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400672
Jeff Layton2422f672010-06-16 13:40:16 -0400673 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700674 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400675 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000676 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500677 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200678 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000679 goto reopen_success;
680 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700681 /*
682 * fallthrough to retry open the old way on errors, especially
683 * in the reconnect path it is important to retry hard
684 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000685 }
686
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700687 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000688
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500689 if (backup_cred(cifs_sb))
690 create_options |= CREATE_OPEN_BACKUP_INTENT;
691
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700692 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400693 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700694
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400695 oparms.tcon = tcon;
696 oparms.cifs_sb = cifs_sb;
697 oparms.desired_access = desired_access;
698 oparms.create_options = create_options;
699 oparms.disposition = disposition;
700 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400701 oparms.fid = &cfile->fid;
702 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400703
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700704 /*
705 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400706 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700707 * file might have write behind data that needs to be flushed and server
708 * version of file size can be stale. If we knew for sure that inode was
709 * not dirty locally we could do this.
710 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400711 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400712 if (rc == -ENOENT && oparms.reconnect == false) {
713 /* durable handle timeout is expired - open the file again */
714 rc = server->ops->open(xid, &oparms, &oplock, NULL);
715 /* indicate that we need to relock the file */
716 oparms.reconnect = true;
717 }
718
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700720 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500721 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
722 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400723 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 }
Jeff Layton15886172010-10-15 15:33:59 -0400725
726reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700727 cfile->invalidHandle = false;
728 mutex_unlock(&cfile->fh_mutex);
729 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400730
731 if (can_flush) {
732 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400733 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400734
Jeff Layton15886172010-10-15 15:33:59 -0400735 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700736 rc = cifs_get_inode_info_unix(&inode, full_path,
737 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400738 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700739 rc = cifs_get_inode_info(&inode, full_path, NULL,
740 inode->i_sb, xid, NULL);
741 }
742 /*
743 * Else we are writing out data to server already and could deadlock if
744 * we tried to flush data, and since we do not know if we have data that
745 * would invalidate the current end of file on the server we can not go
746 * to the server to get the new inode info.
747 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300748
Pavel Shilovskyde740252016-10-11 15:34:07 -0700749 /*
750 * If the server returned a read oplock and we have mandatory brlocks,
751 * set oplock level to None.
752 */
753 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
754 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
755 oplock = 0;
756 }
757
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400758 server->ops->set_fid(cfile, &cfile->fid, oplock);
759 if (oparms.reconnect)
760 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400761
762reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400764 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 return rc;
766}
767
768int cifs_close(struct inode *inode, struct file *file)
769{
Jeff Layton77970692011-04-05 16:23:47 -0700770 if (file->private_data != NULL) {
771 cifsFileInfo_put(file->private_data);
772 file->private_data = NULL;
773 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
Steve Frenchcdff08e2010-10-21 22:46:14 +0000775 /* return code from the ->release op is always ignored */
776 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777}
778
Steve French52ace1e2016-09-22 19:23:56 -0500779void
780cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
781{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700782 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500783 struct list_head *tmp;
784 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700785 struct list_head tmp_list;
786
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800787 if (!tcon->use_persistent || !tcon->need_reopen_files)
788 return;
789
790 tcon->need_reopen_files = false;
791
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700792 cifs_dbg(FYI, "Reopen persistent handles");
793 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500794
795 /* list all files open on tree connection, reopen resilient handles */
796 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700797 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500798 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700799 if (!open_file->invalidHandle)
800 continue;
801 cifsFileInfo_get(open_file);
802 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500803 }
804 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700805
806 list_for_each_safe(tmp, tmp1, &tmp_list) {
807 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800808 if (cifs_reopen_file(open_file, false /* do not flush */))
809 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700810 list_del_init(&open_file->rlist);
811 cifsFileInfo_put(open_file);
812 }
Steve French52ace1e2016-09-22 19:23:56 -0500813}
814
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815int cifs_closedir(struct inode *inode, struct file *file)
816{
817 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400818 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700819 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700820 struct cifs_tcon *tcon;
821 struct TCP_Server_Info *server;
822 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
Joe Perchesf96637b2013-05-04 22:12:25 -0500824 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700826 if (cfile == NULL)
827 return rc;
828
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400829 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700830 tcon = tlink_tcon(cfile->tlink);
831 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832
Joe Perchesf96637b2013-05-04 22:12:25 -0500833 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500834 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400835 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700836 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500837 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700838 if (server->ops->close_dir)
839 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
840 else
841 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500842 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700843 /* not much we can do if it fails anyway, ignore rc */
844 rc = 0;
845 } else
Steve French3afca262016-09-22 18:58:16 -0500846 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700848 buf = cfile->srch_inf.ntwrk_buf_start;
849 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500850 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700851 cfile->srch_inf.ntwrk_buf_start = NULL;
852 if (cfile->srch_inf.smallBuf)
853 cifs_small_buf_release(buf);
854 else
855 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700857
858 cifs_put_tlink(cfile->tlink);
859 kfree(file->private_data);
860 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400862 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 return rc;
864}
865
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400866static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300867cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000868{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400869 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000870 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400871 if (!lock)
872 return lock;
873 lock->offset = offset;
874 lock->length = length;
875 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400876 lock->pid = current->tgid;
877 INIT_LIST_HEAD(&lock->blist);
878 init_waitqueue_head(&lock->block_q);
879 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400880}
881
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700882void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400883cifs_del_lock_waiters(struct cifsLockInfo *lock)
884{
885 struct cifsLockInfo *li, *tmp;
886 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
887 list_del_init(&li->blist);
888 wake_up(&li->block_q);
889 }
890}
891
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400892#define CIFS_LOCK_OP 0
893#define CIFS_READ_OP 1
894#define CIFS_WRITE_OP 2
895
896/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400897static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700898cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
899 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400900 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400901{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300902 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700903 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300904 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400905
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700906 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400907 if (offset + length <= li->offset ||
908 offset >= li->offset + li->length)
909 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400910 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
911 server->ops->compare_fids(cfile, cur_cfile)) {
912 /* shared lock prevents write op through the same fid */
913 if (!(li->type & server->vals->shared_lock_type) ||
914 rw_check != CIFS_WRITE_OP)
915 continue;
916 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700917 if ((type & server->vals->shared_lock_type) &&
918 ((server->ops->compare_fids(cfile, cur_cfile) &&
919 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400920 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700921 if (conf_lock)
922 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700923 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400924 }
925 return false;
926}
927
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700928bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300929cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700930 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400931 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400932{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300933 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700934 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000935 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300936
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700937 list_for_each_entry(cur, &cinode->llist, llist) {
938 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700939 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300940 if (rc)
941 break;
942 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300943
944 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400945}
946
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300947/*
948 * Check if there is another lock that prevents us to set the lock (mandatory
949 * style). If such a lock exists, update the flock structure with its
950 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
951 * or leave it the same if we can't. Returns 0 if we don't need to request to
952 * the server or 1 otherwise.
953 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400954static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300955cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
956 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400957{
958 int rc = 0;
959 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000960 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300961 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400962 bool exist;
963
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700964 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400965
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300966 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400967 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400968 if (exist) {
969 flock->fl_start = conf_lock->offset;
970 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
971 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300972 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400973 flock->fl_type = F_RDLCK;
974 else
975 flock->fl_type = F_WRLCK;
976 } else if (!cinode->can_cache_brlcks)
977 rc = 1;
978 else
979 flock->fl_type = F_UNLCK;
980
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700981 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400982 return rc;
983}
984
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400985static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300986cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400987{
David Howells2b0143b2015-03-17 22:25:59 +0000988 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700989 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700990 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700991 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000992}
993
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300994/*
995 * Set the byte-range lock (mandatory style). Returns:
996 * 1) 0, if we set the lock and don't need to request to the server;
997 * 2) 1, if no locks prevent us but we need to request to the server;
998 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
999 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001000static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001001cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001002 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001003{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001004 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001005 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001006 bool exist;
1007 int rc = 0;
1008
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001009try_again:
1010 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001011 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001012
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001013 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001014 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001015 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001016 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001017 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001018 return rc;
1019 }
1020
1021 if (!exist)
1022 rc = 1;
1023 else if (!wait)
1024 rc = -EACCES;
1025 else {
1026 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001027 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001028 rc = wait_event_interruptible(lock->block_q,
1029 (lock->blist.prev == &lock->blist) &&
1030 (lock->blist.next == &lock->blist));
1031 if (!rc)
1032 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001033 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001034 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001035 }
1036
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001037 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001038 return rc;
1039}
1040
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001041/*
1042 * Check if there is another lock that prevents us to set the lock (posix
1043 * style). If such a lock exists, update the flock structure with its
1044 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1045 * or leave it the same if we can't. Returns 0 if we don't need to request to
1046 * the server or 1 otherwise.
1047 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001048static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001049cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1050{
1051 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001052 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001053 unsigned char saved_type = flock->fl_type;
1054
Pavel Shilovsky50792762011-10-29 17:17:57 +04001055 if ((flock->fl_flags & FL_POSIX) == 0)
1056 return 1;
1057
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001058 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001059 posix_test_lock(file, flock);
1060
1061 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1062 flock->fl_type = saved_type;
1063 rc = 1;
1064 }
1065
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001066 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001067 return rc;
1068}
1069
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001070/*
1071 * Set the byte-range lock (posix style). Returns:
1072 * 1) 0, if we set the lock and don't need to request to the server;
1073 * 2) 1, if we need to request to the server;
1074 * 3) <0, if the error occurs while setting the lock.
1075 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001076static int
1077cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1078{
Al Viro496ad9a2013-01-23 17:07:38 -05001079 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001080 int rc = 1;
1081
1082 if ((flock->fl_flags & FL_POSIX) == 0)
1083 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001084
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001085try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001086 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001087 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001088 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001089 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001090 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001091
1092 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001093 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001094 if (rc == FILE_LOCK_DEFERRED) {
1095 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1096 if (!rc)
1097 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001098 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001099 }
Steve French9ebb3892012-04-01 13:52:54 -05001100 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001101}
1102
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001103int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001104cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001105{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001106 unsigned int xid;
1107 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001108 struct cifsLockInfo *li, *tmp;
1109 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001110 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001111 LOCKING_ANDX_RANGE *buf, *cur;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001112 static const int types[] = {
1113 LOCKING_ANDX_LARGE_FILES,
1114 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1115 };
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001116 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001117
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001118 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001119 tcon = tlink_tcon(cfile->tlink);
1120
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001121 /*
1122 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1123 * and check it for zero before using.
1124 */
1125 max_buf = tcon->ses->server->maxBuf;
1126 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001127 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001128 return -EINVAL;
1129 }
1130
1131 max_num = (max_buf - sizeof(struct smb_hdr)) /
1132 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001133 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001134 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001135 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001136 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001137 }
1138
1139 for (i = 0; i < 2; i++) {
1140 cur = buf;
1141 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001142 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001143 if (li->type != types[i])
1144 continue;
1145 cur->Pid = cpu_to_le16(li->pid);
1146 cur->LengthLow = cpu_to_le32((u32)li->length);
1147 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1148 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1149 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1150 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001151 stored_rc = cifs_lockv(xid, tcon,
1152 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001153 (__u8)li->type, 0, num,
1154 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001155 if (stored_rc)
1156 rc = stored_rc;
1157 cur = buf;
1158 num = 0;
1159 } else
1160 cur++;
1161 }
1162
1163 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001164 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001165 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001166 if (stored_rc)
1167 rc = stored_rc;
1168 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001169 }
1170
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001171 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001172 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001173 return rc;
1174}
1175
Jeff Layton3d224622016-05-24 06:27:44 -04001176static __u32
1177hash_lockowner(fl_owner_t owner)
1178{
1179 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1180}
1181
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001182struct lock_to_push {
1183 struct list_head llist;
1184 __u64 offset;
1185 __u64 length;
1186 __u32 pid;
1187 __u16 netfid;
1188 __u8 type;
1189};
1190
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001191static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001192cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001193{
David Howells2b0143b2015-03-17 22:25:59 +00001194 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001195 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001196 struct file_lock *flock;
1197 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001198 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001199 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001200 struct list_head locks_to_send, *el;
1201 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001202 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001203
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001204 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001205
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001206 if (!flctx)
1207 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001208
Jeff Laytone084c1b2015-02-16 14:32:03 -05001209 spin_lock(&flctx->flc_lock);
1210 list_for_each(el, &flctx->flc_posix) {
1211 count++;
1212 }
1213 spin_unlock(&flctx->flc_lock);
1214
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001215 INIT_LIST_HEAD(&locks_to_send);
1216
1217 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001218 * Allocating count locks is enough because no FL_POSIX locks can be
1219 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001220 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001221 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001222 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001223 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1224 if (!lck) {
1225 rc = -ENOMEM;
1226 goto err_out;
1227 }
1228 list_add_tail(&lck->llist, &locks_to_send);
1229 }
1230
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001231 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001232 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001233 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001234 if (el == &locks_to_send) {
1235 /*
1236 * The list ended. We don't have enough allocated
1237 * structures - something is really wrong.
1238 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001239 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001240 break;
1241 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001242 length = 1 + flock->fl_end - flock->fl_start;
1243 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1244 type = CIFS_RDLCK;
1245 else
1246 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001247 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001248 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001249 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001250 lck->length = length;
1251 lck->type = type;
1252 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001253 }
Jeff Layton6109c852015-01-16 15:05:57 -05001254 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001255
1256 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001257 int stored_rc;
1258
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001259 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001260 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001261 lck->type, 0);
1262 if (stored_rc)
1263 rc = stored_rc;
1264 list_del(&lck->llist);
1265 kfree(lck);
1266 }
1267
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001268out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001269 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001270 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001271err_out:
1272 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1273 list_del(&lck->llist);
1274 kfree(lck);
1275 }
1276 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001277}
1278
1279static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001280cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001281{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001282 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001283 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001284 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001285 int rc = 0;
1286
1287 /* we are going to update can_cache_brlcks here - need a write access */
1288 down_write(&cinode->lock_sem);
1289 if (!cinode->can_cache_brlcks) {
1290 up_write(&cinode->lock_sem);
1291 return rc;
1292 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001293
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001294 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001295 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1296 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001297 rc = cifs_push_posix_locks(cfile);
1298 else
1299 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001300
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001301 cinode->can_cache_brlcks = false;
1302 up_write(&cinode->lock_sem);
1303 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001304}
1305
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001306static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001307cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001308 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001310 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001311 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001312 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001313 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001314 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001315 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001316 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001318 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001319 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001320 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001321 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001322 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001323 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1324 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001325 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001327 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001328 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001329 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001330 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001331 *lock = 1;
1332 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001333 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001334 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001335 *unlock = 1;
1336 /* Check if unlock includes more than one lock range */
1337 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001338 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001339 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001340 *lock = 1;
1341 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001342 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001343 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001344 *lock = 1;
1345 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001346 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001347 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001348 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001350 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001351}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001353static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001354cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001355 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001356{
1357 int rc = 0;
1358 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001359 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1360 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001361 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001362 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001364 if (posix_lck) {
1365 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001366
1367 rc = cifs_posix_lock_test(file, flock);
1368 if (!rc)
1369 return rc;
1370
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001371 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001372 posix_lock_type = CIFS_RDLCK;
1373 else
1374 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001375 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1376 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001377 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001378 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 return rc;
1380 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001381
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001382 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001383 if (!rc)
1384 return rc;
1385
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001386 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001387 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1388 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001389 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001390 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1391 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001392 flock->fl_type = F_UNLCK;
1393 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001394 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1395 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001396 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001397 }
1398
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001399 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001400 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001401 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001402 }
1403
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001404 type &= ~server->vals->exclusive_lock_type;
1405
1406 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1407 type | server->vals->shared_lock_type,
1408 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001409 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001410 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1411 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001412 flock->fl_type = F_RDLCK;
1413 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001414 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1415 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001416 } else
1417 flock->fl_type = F_WRLCK;
1418
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001419 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001420}
1421
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001422void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001423cifs_move_llist(struct list_head *source, struct list_head *dest)
1424{
1425 struct list_head *li, *tmp;
1426 list_for_each_safe(li, tmp, source)
1427 list_move(li, dest);
1428}
1429
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001430void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001431cifs_free_llist(struct list_head *llist)
1432{
1433 struct cifsLockInfo *li, *tmp;
1434 list_for_each_entry_safe(li, tmp, llist, llist) {
1435 cifs_del_lock_waiters(li);
1436 list_del(&li->llist);
1437 kfree(li);
1438 }
1439}
1440
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001441int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001442cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1443 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001444{
1445 int rc = 0, stored_rc;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001446 static const int types[] = {
1447 LOCKING_ANDX_LARGE_FILES,
1448 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1449 };
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001450 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001451 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001452 LOCKING_ANDX_RANGE *buf, *cur;
1453 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001454 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001455 struct cifsLockInfo *li, *tmp;
1456 __u64 length = 1 + flock->fl_end - flock->fl_start;
1457 struct list_head tmp_llist;
1458
1459 INIT_LIST_HEAD(&tmp_llist);
1460
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001461 /*
1462 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1463 * and check it for zero before using.
1464 */
1465 max_buf = tcon->ses->server->maxBuf;
1466 if (!max_buf)
1467 return -EINVAL;
1468
1469 max_num = (max_buf - sizeof(struct smb_hdr)) /
1470 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001471 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001472 if (!buf)
1473 return -ENOMEM;
1474
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001475 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001476 for (i = 0; i < 2; i++) {
1477 cur = buf;
1478 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001479 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001480 if (flock->fl_start > li->offset ||
1481 (flock->fl_start + length) <
1482 (li->offset + li->length))
1483 continue;
1484 if (current->tgid != li->pid)
1485 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001486 if (types[i] != li->type)
1487 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001488 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001489 /*
1490 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001491 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001492 */
1493 list_del(&li->llist);
1494 cifs_del_lock_waiters(li);
1495 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001496 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001497 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001498 cur->Pid = cpu_to_le16(li->pid);
1499 cur->LengthLow = cpu_to_le32((u32)li->length);
1500 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1501 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1502 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1503 /*
1504 * We need to save a lock here to let us add it again to
1505 * the file's list if the unlock range request fails on
1506 * the server.
1507 */
1508 list_move(&li->llist, &tmp_llist);
1509 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001510 stored_rc = cifs_lockv(xid, tcon,
1511 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001512 li->type, num, 0, buf);
1513 if (stored_rc) {
1514 /*
1515 * We failed on the unlock range
1516 * request - add all locks from the tmp
1517 * list to the head of the file's list.
1518 */
1519 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001520 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001521 rc = stored_rc;
1522 } else
1523 /*
1524 * The unlock range request succeed -
1525 * free the tmp list.
1526 */
1527 cifs_free_llist(&tmp_llist);
1528 cur = buf;
1529 num = 0;
1530 } else
1531 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001532 }
1533 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001534 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001535 types[i], num, 0, buf);
1536 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001537 cifs_move_llist(&tmp_llist,
1538 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001539 rc = stored_rc;
1540 } else
1541 cifs_free_llist(&tmp_llist);
1542 }
1543 }
1544
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001545 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001546 kfree(buf);
1547 return rc;
1548}
1549
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001550static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001551cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001552 bool wait_flag, bool posix_lck, int lock, int unlock,
1553 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001554{
1555 int rc = 0;
1556 __u64 length = 1 + flock->fl_end - flock->fl_start;
1557 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1558 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001559 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001560 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001561
1562 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001563 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001564
1565 rc = cifs_posix_lock_set(file, flock);
1566 if (!rc || rc < 0)
1567 return rc;
1568
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001569 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001570 posix_lock_type = CIFS_RDLCK;
1571 else
1572 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001573
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001574 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001575 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001576
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001577 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001578 hash_lockowner(flock->fl_owner),
1579 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001580 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001581 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001582 }
1583
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001584 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001585 struct cifsLockInfo *lock;
1586
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001587 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001588 if (!lock)
1589 return -ENOMEM;
1590
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001591 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001592 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001593 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001594 return rc;
1595 }
1596 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001597 goto out;
1598
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001599 /*
1600 * Windows 7 server can delay breaking lease from read to None
1601 * if we set a byte-range lock on a file - break it explicitly
1602 * before sending the lock to the server to be sure the next
1603 * read won't conflict with non-overlapted locks due to
1604 * pagereading.
1605 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001606 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1607 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001608 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001609 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1610 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001611 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001612 }
1613
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001614 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1615 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001616 if (rc) {
1617 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001618 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001619 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001620
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001621 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001622 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001623 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001624
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001625out:
Chengyu Song00b8c952015-03-24 20:18:49 -04001626 if (flock->fl_flags & FL_POSIX && !rc)
Benjamin Coddington4f656362015-10-22 13:38:14 -04001627 rc = locks_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001628 return rc;
1629}
1630
1631int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1632{
1633 int rc, xid;
1634 int lock = 0, unlock = 0;
1635 bool wait_flag = false;
1636 bool posix_lck = false;
1637 struct cifs_sb_info *cifs_sb;
1638 struct cifs_tcon *tcon;
1639 struct cifsInodeInfo *cinode;
1640 struct cifsFileInfo *cfile;
1641 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001642 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001643
1644 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001645 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001646
Joe Perchesf96637b2013-05-04 22:12:25 -05001647 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1648 cmd, flock->fl_flags, flock->fl_type,
1649 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001650
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001651 cfile = (struct cifsFileInfo *)file->private_data;
1652 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001653
1654 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1655 tcon->ses->server);
1656
Al Viro7119e222014-10-22 00:25:12 -04001657 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001658 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001659 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001660
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001661 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001662 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1663 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1664 posix_lck = true;
1665 /*
1666 * BB add code here to normalize offset and length to account for
1667 * negative length which we can not accept over the wire.
1668 */
1669 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001670 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001671 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001672 return rc;
1673 }
1674
1675 if (!lock && !unlock) {
1676 /*
1677 * if no lock or unlock then nothing to do since we do not
1678 * know what it is
1679 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001680 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001681 return -EOPNOTSUPP;
1682 }
1683
1684 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1685 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001686 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 return rc;
1688}
1689
Jeff Layton597b0272012-03-23 14:40:56 -04001690/*
1691 * update the file size (if needed) after a write. Should be called with
1692 * the inode->i_lock held
1693 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001694void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001695cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1696 unsigned int bytes_written)
1697{
1698 loff_t end_of_write = offset + bytes_written;
1699
1700 if (end_of_write > cifsi->server_eof)
1701 cifsi->server_eof = end_of_write;
1702}
1703
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001704static ssize_t
1705cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1706 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707{
1708 int rc = 0;
1709 unsigned int bytes_written = 0;
1710 unsigned int total_written;
1711 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001712 struct cifs_tcon *tcon;
1713 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001714 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001715 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001716 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001717 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
Jeff Layton7da4b492010-10-15 15:34:00 -04001719 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720
Al Viro35c265e2014-08-19 20:25:34 -04001721 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1722 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001724 tcon = tlink_tcon(open_file->tlink);
1725 server = tcon->ses->server;
1726
1727 if (!server->ops->sync_write)
1728 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001729
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001730 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 for (total_written = 0; write_size > total_written;
1733 total_written += bytes_written) {
1734 rc = -EAGAIN;
1735 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001736 struct kvec iov[2];
1737 unsigned int len;
1738
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 /* we could deadlock if we called
1741 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001742 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001744 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 if (rc != 0)
1746 break;
1747 }
Steve French3e844692005-10-03 13:37:24 -07001748
David Howells2b0143b2015-03-17 22:25:59 +00001749 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001750 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001751 /* iov[0] is reserved for smb header */
1752 iov[1].iov_base = (char *)write_data + total_written;
1753 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001754 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001755 io_parms.tcon = tcon;
1756 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001757 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001758 rc = server->ops->sync_write(xid, &open_file->fid,
1759 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 }
1761 if (rc || (bytes_written == 0)) {
1762 if (total_written)
1763 break;
1764 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001765 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 return rc;
1767 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001768 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001769 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001770 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001771 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001772 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001773 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 }
1775
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001776 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777
Jeff Layton7da4b492010-10-15 15:34:00 -04001778 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001779 spin_lock(&d_inode(dentry)->i_lock);
1780 if (*offset > d_inode(dentry)->i_size)
1781 i_size_write(d_inode(dentry), *offset);
1782 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 }
David Howells2b0143b2015-03-17 22:25:59 +00001784 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001785 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 return total_written;
1787}
1788
Jeff Layton6508d902010-09-29 19:51:11 -04001789struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1790 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001791{
1792 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001793 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001794 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Layton6508d902010-09-29 19:51:11 -04001795
1796 /* only filter by fsuid on multiuser mounts */
1797 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1798 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001799
Steve French3afca262016-09-22 18:58:16 -05001800 spin_lock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001801 /* we could simply get the first_list_entry since write-only entries
1802 are always at the end of the list but since the first entry might
1803 have a close pending, we go through the whole list */
1804 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001805 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001806 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001807 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001808 if (!open_file->invalidHandle) {
1809 /* found a good file */
1810 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001811 cifsFileInfo_get(open_file);
1812 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001813 return open_file;
1814 } /* else might as well continue, and look for
1815 another, or simply have the caller reopen it
1816 again rather than trying to fix this handle */
1817 } else /* write only file */
1818 break; /* write only files are last so must be done */
1819 }
Steve French3afca262016-09-22 18:58:16 -05001820 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001821 return NULL;
1822}
Steve French630f3f0c2007-10-25 21:17:17 +00001823
Jeff Layton6508d902010-09-29 19:51:11 -04001824struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1825 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001826{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001827 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001828 struct cifs_sb_info *cifs_sb;
Steve French3afca262016-09-22 18:58:16 -05001829 struct cifs_tcon *tcon;
Jeff Layton2846d382008-09-22 21:33:33 -04001830 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001831 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001832 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001833
Steve French60808232006-04-22 15:53:05 +00001834 /* Having a null inode here (because mapping->host was set to zero by
1835 the VFS or MM) should not happen but we had reports of on oops (due to
1836 it being zero) during stress testcases so we need to check for it */
1837
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001838 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001839 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001840 dump_stack();
1841 return NULL;
1842 }
1843
Jeff Laytond3892292010-11-02 16:22:50 -04001844 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001845 tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Laytond3892292010-11-02 16:22:50 -04001846
Jeff Layton6508d902010-09-29 19:51:11 -04001847 /* only filter by fsuid on multiuser mounts */
1848 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1849 fsuid_only = false;
1850
Steve French3afca262016-09-22 18:58:16 -05001851 spin_lock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001852refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001853 if (refind > MAX_REOPEN_ATT) {
Steve French3afca262016-09-22 18:58:16 -05001854 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001855 return NULL;
1856 }
Steve French6148a742005-10-05 12:23:19 -07001857 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001858 if (!any_available && open_file->pid != current->tgid)
1859 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001860 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001861 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001862 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001863 if (!open_file->invalidHandle) {
1864 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001865 cifsFileInfo_get(open_file);
1866 spin_unlock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001867 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001868 } else {
1869 if (!inv_file)
1870 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001871 }
Steve French6148a742005-10-05 12:23:19 -07001872 }
1873 }
Jeff Layton2846d382008-09-22 21:33:33 -04001874 /* couldn't find useable FH with same pid, try any available */
1875 if (!any_available) {
1876 any_available = true;
1877 goto refind_writable;
1878 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001879
1880 if (inv_file) {
1881 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001882 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001883 }
1884
Steve French3afca262016-09-22 18:58:16 -05001885 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001886
1887 if (inv_file) {
1888 rc = cifs_reopen_file(inv_file, false);
1889 if (!rc)
1890 return inv_file;
1891 else {
Steve French3afca262016-09-22 18:58:16 -05001892 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001893 list_move_tail(&inv_file->flist,
1894 &cifs_inode->openFileList);
Steve French3afca262016-09-22 18:58:16 -05001895 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001896 cifsFileInfo_put(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001897 ++refind;
David Disseldorpe1e9bda2015-03-13 14:20:29 +01001898 inv_file = NULL;
Steve French3afca262016-09-22 18:58:16 -05001899 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001900 goto refind_writable;
1901 }
1902 }
1903
Steve French6148a742005-10-05 12:23:19 -07001904 return NULL;
1905}
1906
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1908{
1909 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001910 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 char *write_data;
1912 int rc = -EFAULT;
1913 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001915 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916
1917 if (!mapping || !mapping->host)
1918 return -EFAULT;
1919
1920 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921
1922 offset += (loff_t)from;
1923 write_data = kmap(page);
1924 write_data += from;
1925
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001926 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 kunmap(page);
1928 return -EIO;
1929 }
1930
1931 /* racing with truncate? */
1932 if (offset > mapping->host->i_size) {
1933 kunmap(page);
1934 return 0; /* don't care */
1935 }
1936
1937 /* check to make sure that we are not extending the file */
1938 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001939 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940
Jeff Layton6508d902010-09-29 19:51:11 -04001941 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001942 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001943 bytes_written = cifs_write(open_file, open_file->pid,
1944 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001945 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07001947 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001948 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001949 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001950 else if (bytes_written < 0)
1951 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001952 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05001953 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 rc = -EIO;
1955 }
1956
1957 kunmap(page);
1958 return rc;
1959}
1960
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001961static struct cifs_writedata *
1962wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1963 pgoff_t end, pgoff_t *index,
1964 unsigned int *found_pages)
1965{
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001966 struct cifs_writedata *wdata;
1967
1968 wdata = cifs_writedata_alloc((unsigned int)tofind,
1969 cifs_writev_complete);
1970 if (!wdata)
1971 return NULL;
1972
Jan Kara9c19a9c2017-11-15 17:35:26 -08001973 *found_pages = find_get_pages_range_tag(mapping, index, end,
1974 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001975 return wdata;
1976}
1977
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04001978static unsigned int
1979wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
1980 struct address_space *mapping,
1981 struct writeback_control *wbc,
1982 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
1983{
1984 unsigned int nr_pages = 0, i;
1985 struct page *page;
1986
1987 for (i = 0; i < found_pages; i++) {
1988 page = wdata->pages[i];
1989 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001990 * At this point we hold neither the i_pages lock nor the
1991 * page lock: the page may be truncated or invalidated
1992 * (changing page->mapping to NULL), or even swizzled
1993 * back from swapper_space to tmpfs file mapping
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04001994 */
1995
1996 if (nr_pages == 0)
1997 lock_page(page);
1998 else if (!trylock_page(page))
1999 break;
2000
2001 if (unlikely(page->mapping != mapping)) {
2002 unlock_page(page);
2003 break;
2004 }
2005
2006 if (!wbc->range_cyclic && page->index > end) {
2007 *done = true;
2008 unlock_page(page);
2009 break;
2010 }
2011
2012 if (*next && (page->index != *next)) {
2013 /* Not next consecutive page */
2014 unlock_page(page);
2015 break;
2016 }
2017
2018 if (wbc->sync_mode != WB_SYNC_NONE)
2019 wait_on_page_writeback(page);
2020
2021 if (PageWriteback(page) ||
2022 !clear_page_dirty_for_io(page)) {
2023 unlock_page(page);
2024 break;
2025 }
2026
2027 /*
2028 * This actually clears the dirty bit in the radix tree.
2029 * See cifs_writepage() for more commentary.
2030 */
2031 set_page_writeback(page);
2032 if (page_offset(page) >= i_size_read(mapping->host)) {
2033 *done = true;
2034 unlock_page(page);
2035 end_page_writeback(page);
2036 break;
2037 }
2038
2039 wdata->pages[i] = page;
2040 *next = page->index + 1;
2041 ++nr_pages;
2042 }
2043
2044 /* reset index to refind any pages skipped */
2045 if (nr_pages == 0)
2046 *index = wdata->pages[0]->index + 1;
2047
2048 /* put any pages we aren't going to use */
2049 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002050 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002051 wdata->pages[i] = NULL;
2052 }
2053
2054 return nr_pages;
2055}
2056
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002057static int
2058wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2059 struct address_space *mapping, struct writeback_control *wbc)
2060{
2061 int rc = 0;
2062 struct TCP_Server_Info *server;
2063 unsigned int i;
2064
2065 wdata->sync_mode = wbc->sync_mode;
2066 wdata->nr_pages = nr_pages;
2067 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002068 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002069 wdata->tailsz = min(i_size_read(mapping->host) -
2070 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002071 (loff_t)PAGE_SIZE);
2072 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002073
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002074 if (wdata->cfile != NULL)
2075 cifsFileInfo_put(wdata->cfile);
2076 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2077 if (!wdata->cfile) {
2078 cifs_dbg(VFS, "No writable handles for inode\n");
2079 rc = -EBADF;
2080 } else {
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002081 wdata->pid = wdata->cfile->pid;
2082 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2083 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002084 }
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002085
2086 for (i = 0; i < nr_pages; ++i)
2087 unlock_page(wdata->pages[i]);
2088
2089 return rc;
2090}
2091
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002093 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002095 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002096 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002097 bool done = false, scanned = false, range_whole = false;
2098 pgoff_t end, index;
2099 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07002100 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00002101
Steve French37c0eb42005-10-05 14:50:29 -07002102 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002103 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002104 * one page at a time via cifs_writepage
2105 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002106 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002107 return generic_writepages(mapping, wbc);
2108
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002109 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002110 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002111 end = -1;
2112 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002113 index = wbc->range_start >> PAGE_SHIFT;
2114 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002115 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002116 range_whole = true;
2117 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002118 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002119 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002120retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002121 while (!done && index <= end) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002122 unsigned int i, nr_pages, found_pages, wsize, credits;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002123 pgoff_t next = 0, tofind, saved_index = index;
Steve French37c0eb42005-10-05 14:50:29 -07002124
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002125 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2126 &wsize, &credits);
2127 if (rc)
2128 break;
Steve French37c0eb42005-10-05 14:50:29 -07002129
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002130 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002131
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002132 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2133 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002134 if (!wdata) {
2135 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002136 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002137 break;
2138 }
2139
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002140 if (found_pages == 0) {
2141 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002142 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002143 break;
2144 }
2145
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002146 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2147 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002148
2149 /* nothing to write? */
2150 if (nr_pages == 0) {
2151 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002152 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002153 continue;
2154 }
2155
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002156 wdata->credits = credits;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002157
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002158 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002159
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002160 /* send failure -- clean up the mess */
2161 if (rc != 0) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002162 add_credits_and_wake_if(server, wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002163 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002164 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002165 redirty_page_for_writepage(wbc,
2166 wdata->pages[i]);
2167 else
2168 SetPageError(wdata->pages[i]);
2169 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002170 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002171 }
Jeff Layton941b8532011-01-11 07:24:01 -05002172 if (rc != -EAGAIN)
2173 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002174 }
2175 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002176
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002177 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2178 index = saved_index;
2179 continue;
2180 }
2181
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002182 wbc->nr_to_write -= nr_pages;
2183 if (wbc->nr_to_write <= 0)
2184 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002185
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002186 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002187 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002188
Steve French37c0eb42005-10-05 14:50:29 -07002189 if (!scanned && !done) {
2190 /*
2191 * We hit the last page and there is more work to be done: wrap
2192 * back to the start of the file
2193 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002194 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002195 index = 0;
2196 goto retry;
2197 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002198
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002199 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002200 mapping->writeback_index = index;
2201
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 return rc;
2203}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002205static int
2206cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002208 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002209 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002211 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002213 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002214 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002215 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002216
2217 /*
2218 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2219 *
2220 * A writepage() implementation always needs to do either this,
2221 * or re-dirty the page with "redirty_page_for_writepage()" in
2222 * the case of a failure.
2223 *
2224 * Just unlocking the page will cause the radix tree tag-bits
2225 * to fail to update with the state of the page correctly.
2226 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002227 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002228retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002229 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Jeff Layton97b37f22017-05-25 06:59:52 -04002230 if (rc == -EAGAIN) {
2231 if (wbc->sync_mode == WB_SYNC_ALL)
2232 goto retry_write;
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002233 redirty_page_for_writepage(wbc, page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002234 } else if (rc != 0) {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002235 SetPageError(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002236 mapping_set_error(page->mapping, rc);
2237 } else {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002238 SetPageUptodate(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002239 }
Linus Torvaldscb876f42006-12-23 16:19:07 -08002240 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002241 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002242 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 return rc;
2244}
2245
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002246static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2247{
2248 int rc = cifs_writepage_locked(page, wbc);
2249 unlock_page(page);
2250 return rc;
2251}
2252
Nick Piggind9414772008-09-24 11:32:59 -04002253static int cifs_write_end(struct file *file, struct address_space *mapping,
2254 loff_t pos, unsigned len, unsigned copied,
2255 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256{
Nick Piggind9414772008-09-24 11:32:59 -04002257 int rc;
2258 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002259 struct cifsFileInfo *cfile = file->private_data;
2260 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2261 __u32 pid;
2262
2263 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2264 pid = cfile->pid;
2265 else
2266 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
Joe Perchesf96637b2013-05-04 22:12:25 -05002268 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002269 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002270
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002271 if (PageChecked(page)) {
2272 if (copied == len)
2273 SetPageUptodate(page);
2274 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002275 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002276 SetPageUptodate(page);
2277
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002279 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002280 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002281 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002282
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002283 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 /* this is probably better than directly calling
2285 partialpage_write since in this function the file handle is
2286 known which we might as well leverage */
2287 /* BB check if anything else missing out of ppw
2288 such as updating last write time */
2289 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002290 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002291 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002293
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002294 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002295 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002296 rc = copied;
2297 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002298 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 }
2300
Nick Piggind9414772008-09-24 11:32:59 -04002301 if (rc > 0) {
2302 spin_lock(&inode->i_lock);
2303 if (pos > inode->i_size)
2304 i_size_write(inode, pos);
2305 spin_unlock(&inode->i_lock);
2306 }
2307
2308 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002309 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002310
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 return rc;
2312}
2313
Josef Bacik02c24a82011-07-16 20:44:56 -04002314int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2315 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002317 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002319 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002320 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002321 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002322 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002323 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002325 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002326 if (rc)
2327 return rc;
Al Viro59551022016-01-22 15:40:57 -05002328 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04002329
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002330 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331
Al Viro35c265e2014-08-19 20:25:34 -04002332 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2333 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002334
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002335 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002336 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002337 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002338 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002339 rc = 0; /* don't care about it in fsync */
2340 }
2341 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002342
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002343 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002344 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2345 server = tcon->ses->server;
2346 if (server->ops->flush)
2347 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2348 else
2349 rc = -ENOSYS;
2350 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002351
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002352 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002353 inode_unlock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002354 return rc;
2355}
2356
Josef Bacik02c24a82011-07-16 20:44:56 -04002357int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002358{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002359 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002360 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002361 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002362 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002363 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002364 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002365 struct inode *inode = file->f_mapping->host;
2366
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002367 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002368 if (rc)
2369 return rc;
Al Viro59551022016-01-22 15:40:57 -05002370 inode_lock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002371
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002372 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002373
Al Viro35c265e2014-08-19 20:25:34 -04002374 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2375 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002376
2377 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002378 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2379 server = tcon->ses->server;
2380 if (server->ops->flush)
2381 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2382 else
2383 rc = -ENOSYS;
2384 }
Steve Frenchb298f222009-02-21 21:17:43 +00002385
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002386 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002387 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 return rc;
2389}
2390
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391/*
2392 * As file closes, flush all cached write data for this inode checking
2393 * for write behind errors.
2394 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002395int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396{
Al Viro496ad9a2013-01-23 17:07:38 -05002397 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 int rc = 0;
2399
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002400 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002401 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002402
Joe Perchesf96637b2013-05-04 22:12:25 -05002403 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404
2405 return rc;
2406}
2407
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002408static int
2409cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2410{
2411 int rc = 0;
2412 unsigned long i;
2413
2414 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002415 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002416 if (!pages[i]) {
2417 /*
2418 * save number of pages we have already allocated and
2419 * return with ENOMEM error
2420 */
2421 num_pages = i;
2422 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002423 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002424 }
2425 }
2426
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002427 if (rc) {
2428 for (i = 0; i < num_pages; i++)
2429 put_page(pages[i]);
2430 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002431 return rc;
2432}
2433
2434static inline
2435size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2436{
2437 size_t num_pages;
2438 size_t clen;
2439
2440 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002441 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002442
2443 if (cur_len)
2444 *cur_len = clen;
2445
2446 return num_pages;
2447}
2448
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002449static void
Steve French4a5c80d2014-02-07 20:45:12 -06002450cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002451{
2452 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002453 struct cifs_writedata *wdata = container_of(refcount,
2454 struct cifs_writedata, refcount);
2455
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002456 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
Steve French4a5c80d2014-02-07 20:45:12 -06002457 for (i = 0; i < wdata->nr_pages; i++)
2458 put_page(wdata->pages[i]);
2459 cifs_writedata_release(refcount);
2460}
2461
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002462static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2463
Steve French4a5c80d2014-02-07 20:45:12 -06002464static void
2465cifs_uncached_writev_complete(struct work_struct *work)
2466{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002467 struct cifs_writedata *wdata = container_of(work,
2468 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002469 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002470 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2471
2472 spin_lock(&inode->i_lock);
2473 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2474 if (cifsi->server_eof > inode->i_size)
2475 i_size_write(inode, cifsi->server_eof);
2476 spin_unlock(&inode->i_lock);
2477
2478 complete(&wdata->done);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002479 collect_uncached_write_data(wdata->ctx);
2480 /* the below call can possibly free the last ref to aio ctx */
Steve French4a5c80d2014-02-07 20:45:12 -06002481 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002482}
2483
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002484static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002485wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2486 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002487{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002488 size_t save_len, copied, bytes, cur_len = *len;
2489 unsigned long i, nr_pages = *num_pages;
2490
2491 save_len = cur_len;
2492 for (i = 0; i < nr_pages; i++) {
2493 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2494 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2495 cur_len -= copied;
2496 /*
2497 * If we didn't copy as much as we expected, then that
2498 * may mean we trod into an unmapped area. Stop copying
2499 * at that point. On the next pass through the big
2500 * loop, we'll likely end up getting a zero-length
2501 * write and bailing out of it.
2502 */
2503 if (copied < bytes)
2504 break;
2505 }
2506 cur_len = save_len - cur_len;
2507 *len = cur_len;
2508
2509 /*
2510 * If we have no data to send, then that probably means that
2511 * the copy above failed altogether. That's most likely because
2512 * the address in the iovec was bogus. Return -EFAULT and let
2513 * the caller free anything we allocated and bail out.
2514 */
2515 if (!cur_len)
2516 return -EFAULT;
2517
2518 /*
2519 * i + 1 now represents the number of pages we actually used in
2520 * the copy phase above.
2521 */
2522 *num_pages = i + 1;
2523 return 0;
2524}
2525
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002526static int
2527cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2528 struct cifsFileInfo *open_file,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002529 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2530 struct cifs_aio_ctx *ctx)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002531{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002532 int rc = 0;
2533 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002534 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002535 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002536 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002537 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002538 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002539 struct TCP_Server_Info *server;
2540
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002541 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2542 pid = open_file->pid;
2543 else
2544 pid = current->tgid;
2545
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002546 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002547
2548 do {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002549 unsigned int wsize, credits;
2550
2551 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2552 &wsize, &credits);
2553 if (rc)
2554 break;
2555
2556 nr_pages = get_numpages(wsize, len, &cur_len);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002557 wdata = cifs_writedata_alloc(nr_pages,
2558 cifs_uncached_writev_complete);
2559 if (!wdata) {
2560 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002561 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002562 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002563 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002564
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002565 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2566 if (rc) {
2567 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002568 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002569 break;
2570 }
2571
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002572 num_pages = nr_pages;
2573 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2574 if (rc) {
Jeff Layton5d81de82014-02-14 07:20:35 -05002575 for (i = 0; i < nr_pages; i++)
2576 put_page(wdata->pages[i]);
2577 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002578 add_credits_and_wake_if(server, credits, 0);
Jeff Layton5d81de82014-02-14 07:20:35 -05002579 break;
2580 }
2581
2582 /*
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002583 * Bring nr_pages down to the number of pages we actually used,
2584 * and free any pages that we didn't use.
Jeff Layton5d81de82014-02-14 07:20:35 -05002585 */
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002586 for ( ; nr_pages > num_pages; nr_pages--)
Jeff Layton5d81de82014-02-14 07:20:35 -05002587 put_page(wdata->pages[nr_pages - 1]);
2588
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002589 wdata->sync_mode = WB_SYNC_ALL;
2590 wdata->nr_pages = nr_pages;
2591 wdata->offset = (__u64)offset;
2592 wdata->cfile = cifsFileInfo_get(open_file);
2593 wdata->pid = pid;
2594 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002595 wdata->pagesz = PAGE_SIZE;
2596 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002597 wdata->credits = credits;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002598 wdata->ctx = ctx;
2599 kref_get(&ctx->refcount);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002600
2601 if (!wdata->cfile->invalidHandle ||
Germano Percossi1fa839b2017-04-07 12:29:38 +01002602 !(rc = cifs_reopen_file(wdata->cfile, false)))
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002603 rc = server->ops->async_writev(wdata,
2604 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002605 if (rc) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002606 add_credits_and_wake_if(server, wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002607 kref_put(&wdata->refcount,
2608 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002609 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002610 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002611 iov_iter_advance(from, offset - saved_offset);
2612 continue;
2613 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002614 break;
2615 }
2616
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002617 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002618 offset += cur_len;
2619 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002620 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002621
2622 return rc;
2623}
2624
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002625static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2626{
2627 struct cifs_writedata *wdata, *tmp;
2628 struct cifs_tcon *tcon;
2629 struct cifs_sb_info *cifs_sb;
2630 struct dentry *dentry = ctx->cfile->dentry;
2631 unsigned int i;
2632 int rc;
2633
2634 tcon = tlink_tcon(ctx->cfile->tlink);
2635 cifs_sb = CIFS_SB(dentry->d_sb);
2636
2637 mutex_lock(&ctx->aio_mutex);
2638
2639 if (list_empty(&ctx->list)) {
2640 mutex_unlock(&ctx->aio_mutex);
2641 return;
2642 }
2643
2644 rc = ctx->rc;
2645 /*
2646 * Wait for and collect replies for any successful sends in order of
2647 * increasing offset. Once an error is hit, then return without waiting
2648 * for any more replies.
2649 */
2650restart_loop:
2651 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
2652 if (!rc) {
2653 if (!try_wait_for_completion(&wdata->done)) {
2654 mutex_unlock(&ctx->aio_mutex);
2655 return;
2656 }
2657
2658 if (wdata->result)
2659 rc = wdata->result;
2660 else
2661 ctx->total_len += wdata->bytes;
2662
2663 /* resend call if it's a retryable error */
2664 if (rc == -EAGAIN) {
2665 struct list_head tmp_list;
2666 struct iov_iter tmp_from = ctx->iter;
2667
2668 INIT_LIST_HEAD(&tmp_list);
2669 list_del_init(&wdata->list);
2670
2671 iov_iter_advance(&tmp_from,
2672 wdata->offset - ctx->pos);
2673
2674 rc = cifs_write_from_iter(wdata->offset,
2675 wdata->bytes, &tmp_from,
2676 ctx->cfile, cifs_sb, &tmp_list,
2677 ctx);
2678
2679 list_splice(&tmp_list, &ctx->list);
2680
2681 kref_put(&wdata->refcount,
2682 cifs_uncached_writedata_release);
2683 goto restart_loop;
2684 }
2685 }
2686 list_del_init(&wdata->list);
2687 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2688 }
2689
2690 for (i = 0; i < ctx->npages; i++)
2691 put_page(ctx->bv[i].bv_page);
2692
2693 cifs_stats_bytes_written(tcon, ctx->total_len);
2694 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
2695
2696 ctx->rc = (rc == 0) ? ctx->total_len : rc;
2697
2698 mutex_unlock(&ctx->aio_mutex);
2699
2700 if (ctx->iocb && ctx->iocb->ki_complete)
2701 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
2702 else
2703 complete(&ctx->done);
2704}
2705
Al Viroe9d15932015-04-06 22:44:11 -04002706ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002707{
Al Viroe9d15932015-04-06 22:44:11 -04002708 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002709 ssize_t total_written = 0;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002710 struct cifsFileInfo *cfile;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002711 struct cifs_tcon *tcon;
2712 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002713 struct cifs_aio_ctx *ctx;
Al Virofc56b982016-09-21 18:18:23 -04002714 struct iov_iter saved_from = *from;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002715 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002716
Al Viroe9d15932015-04-06 22:44:11 -04002717 /*
2718 * BB - optimize the way when signing is disabled. We can drop this
2719 * extra memory-to-memory copying and use iovec buffers for constructing
2720 * write request.
2721 */
2722
Al Viro3309dd02015-04-09 12:55:47 -04002723 rc = generic_write_checks(iocb, from);
2724 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002725 return rc;
2726
Al Viro7119e222014-10-22 00:25:12 -04002727 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002728 cfile = file->private_data;
2729 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002730
2731 if (!tcon->ses->server->ops->async_writev)
2732 return -ENOSYS;
2733
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002734 ctx = cifs_aio_ctx_alloc();
2735 if (!ctx)
2736 return -ENOMEM;
2737
2738 ctx->cfile = cifsFileInfo_get(cfile);
2739
2740 if (!is_sync_kiocb(iocb))
2741 ctx->iocb = iocb;
2742
2743 ctx->pos = iocb->ki_pos;
2744
2745 rc = setup_aio_ctx_iter(ctx, from, WRITE);
2746 if (rc) {
2747 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2748 return rc;
2749 }
2750
2751 /* grab a lock here due to read response handlers can access ctx */
2752 mutex_lock(&ctx->aio_mutex);
2753
2754 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
2755 cfile, cifs_sb, &ctx->list, ctx);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002756
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002757 /*
2758 * If at least one write was successfully sent, then discard any rc
2759 * value from the later writes. If the other write succeeds, then
2760 * we'll end up returning whatever was written. If it fails, then
2761 * we'll get a new rc value from that.
2762 */
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002763 if (!list_empty(&ctx->list))
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002764 rc = 0;
2765
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002766 mutex_unlock(&ctx->aio_mutex);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002767
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002768 if (rc) {
2769 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2770 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002771 }
2772
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002773 if (!is_sync_kiocb(iocb)) {
2774 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2775 return -EIOCBQUEUED;
2776 }
2777
2778 rc = wait_for_completion_killable(&ctx->done);
2779 if (rc) {
2780 mutex_lock(&ctx->aio_mutex);
2781 ctx->rc = rc = -EINTR;
2782 total_written = ctx->total_len;
2783 mutex_unlock(&ctx->aio_mutex);
2784 } else {
2785 rc = ctx->rc;
2786 total_written = ctx->total_len;
2787 }
2788
2789 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2790
Al Viroe9d15932015-04-06 22:44:11 -04002791 if (unlikely(!total_written))
2792 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002793
Al Viroe9d15932015-04-06 22:44:11 -04002794 iocb->ki_pos += total_written;
Al Viroe9d15932015-04-06 22:44:11 -04002795 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002796}
2797
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002798static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002799cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002800{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002801 struct file *file = iocb->ki_filp;
2802 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2803 struct inode *inode = file->f_mapping->host;
2804 struct cifsInodeInfo *cinode = CIFS_I(inode);
2805 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04002806 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002807
Rabin Vincent966681c2017-06-29 16:01:42 +02002808 inode_lock(inode);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002809 /*
2810 * We need to hold the sem to be sure nobody modifies lock list
2811 * with a brlock that prevents writing.
2812 */
2813 down_read(&cinode->lock_sem);
Al Viro5f380c72015-04-07 11:28:12 -04002814
Al Viro3309dd02015-04-09 12:55:47 -04002815 rc = generic_write_checks(iocb, from);
2816 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04002817 goto out;
2818
Al Viro5f380c72015-04-07 11:28:12 -04002819 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002820 server->vals->exclusive_lock_type, NULL,
Al Viro5f380c72015-04-07 11:28:12 -04002821 CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04002822 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04002823 else
2824 rc = -EACCES;
2825out:
Rabin Vincent966681c2017-06-29 16:01:42 +02002826 up_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05002827 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04002828
Christoph Hellwige2592212016-04-07 08:52:01 -07002829 if (rc > 0)
2830 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002831 return rc;
2832}
2833
2834ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002835cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002836{
Al Viro496ad9a2013-01-23 17:07:38 -05002837 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002838 struct cifsInodeInfo *cinode = CIFS_I(inode);
2839 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2840 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2841 iocb->ki_filp->private_data;
2842 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002843 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002844
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002845 written = cifs_get_writer(cinode);
2846 if (written)
2847 return written;
2848
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002849 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002850 if (cap_unix(tcon->ses) &&
2851 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002852 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04002853 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002854 goto out;
2855 }
Al Viro3dae8752014-04-03 12:05:17 -04002856 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002857 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002858 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002859 /*
2860 * For non-oplocked files in strict cache mode we need to write the data
2861 * to the server exactly from the pos to pos+len-1 rather than flush all
2862 * affected pages because it may cause a error with mandatory locks on
2863 * these pages but not on the region from pos to ppos+len-1.
2864 */
Al Viro3dae8752014-04-03 12:05:17 -04002865 written = cifs_user_writev(iocb, from);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002866 if (written > 0 && CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002867 /*
2868 * Windows 7 server can delay breaking level2 oplock if a write
2869 * request comes - break it on the client to prevent reading
2870 * an old data.
2871 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002872 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05002873 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2874 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002875 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002876 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002877out:
2878 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002879 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002880}
2881
Jeff Layton0471ca32012-05-16 07:13:16 -04002882static struct cifs_readdata *
Long Lif9f5aca2018-05-30 12:47:54 -07002883cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002884{
2885 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002886
Long Lif9f5aca2018-05-30 12:47:54 -07002887 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002888 if (rdata != NULL) {
Long Lif9f5aca2018-05-30 12:47:54 -07002889 rdata->pages = pages;
Jeff Layton6993f742012-05-16 07:13:17 -04002890 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002891 INIT_LIST_HEAD(&rdata->list);
2892 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002893 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002894 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002895
Jeff Layton0471ca32012-05-16 07:13:16 -04002896 return rdata;
2897}
2898
Long Lif9f5aca2018-05-30 12:47:54 -07002899static struct cifs_readdata *
2900cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
2901{
2902 struct page **pages =
Kees Cook6396bb22018-06-12 14:03:40 -07002903 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Long Lif9f5aca2018-05-30 12:47:54 -07002904 struct cifs_readdata *ret = NULL;
2905
2906 if (pages) {
2907 ret = cifs_readdata_direct_alloc(pages, complete);
2908 if (!ret)
2909 kfree(pages);
2910 }
2911
2912 return ret;
2913}
2914
Jeff Layton6993f742012-05-16 07:13:17 -04002915void
2916cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002917{
Jeff Layton6993f742012-05-16 07:13:17 -04002918 struct cifs_readdata *rdata = container_of(refcount,
2919 struct cifs_readdata, refcount);
Long Libd3dcc62017-11-22 17:38:47 -07002920#ifdef CONFIG_CIFS_SMB_DIRECT
2921 if (rdata->mr) {
2922 smbd_deregister_mr(rdata->mr);
2923 rdata->mr = NULL;
2924 }
2925#endif
Jeff Layton6993f742012-05-16 07:13:17 -04002926 if (rdata->cfile)
2927 cifsFileInfo_put(rdata->cfile);
2928
Long Lif9f5aca2018-05-30 12:47:54 -07002929 kvfree(rdata->pages);
Jeff Layton0471ca32012-05-16 07:13:16 -04002930 kfree(rdata);
2931}
2932
Jeff Layton2a1bb132012-05-16 07:13:17 -04002933static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002934cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002935{
2936 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002937 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002938 unsigned int i;
2939
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002940 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002941 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2942 if (!page) {
2943 rc = -ENOMEM;
2944 break;
2945 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002946 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002947 }
2948
2949 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002950 for (i = 0; i < nr_pages; i++) {
2951 put_page(rdata->pages[i]);
2952 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002953 }
2954 }
2955 return rc;
2956}
2957
2958static void
2959cifs_uncached_readdata_release(struct kref *refcount)
2960{
Jeff Layton1c892542012-05-16 07:13:17 -04002961 struct cifs_readdata *rdata = container_of(refcount,
2962 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002963 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002964
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07002965 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002966 for (i = 0; i < rdata->nr_pages; i++) {
2967 put_page(rdata->pages[i]);
2968 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002969 }
2970 cifs_readdata_release(refcount);
2971}
2972
Jeff Layton1c892542012-05-16 07:13:17 -04002973/**
2974 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2975 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05002976 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04002977 *
2978 * This function copies data from a list of pages in a readdata response into
2979 * an array of iovecs. It will first calculate where the data should go
2980 * based on the info in the readdata and then copy the data into that spot.
2981 */
Al Viro7f25bba2014-02-04 14:07:43 -05002982static int
2983cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04002984{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04002985 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002986 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002987
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002988 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002989 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02002990 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovsky9c257022017-01-19 13:53:15 -08002991 size_t written;
2992
David Howells00e23702018-10-22 13:07:28 +01002993 if (unlikely(iov_iter_is_pipe(iter))) {
Pavel Shilovsky9c257022017-01-19 13:53:15 -08002994 void *addr = kmap_atomic(page);
2995
2996 written = copy_to_iter(addr, copy, iter);
2997 kunmap_atomic(addr);
2998 } else
2999 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05003000 remaining -= written;
3001 if (written < copy && iov_iter_count(iter) > 0)
3002 break;
Jeff Layton1c892542012-05-16 07:13:17 -04003003 }
Al Viro7f25bba2014-02-04 14:07:43 -05003004 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04003005}
3006
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003007static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3008
Jeff Layton1c892542012-05-16 07:13:17 -04003009static void
3010cifs_uncached_readv_complete(struct work_struct *work)
3011{
3012 struct cifs_readdata *rdata = container_of(work,
3013 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04003014
3015 complete(&rdata->done);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003016 collect_uncached_read_data(rdata->ctx);
3017 /* the below call can possibly free the last ref to aio ctx */
Jeff Layton1c892542012-05-16 07:13:17 -04003018 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3019}
3020
3021static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003022uncached_fill_pages(struct TCP_Server_Info *server,
3023 struct cifs_readdata *rdata, struct iov_iter *iter,
3024 unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04003025{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003026 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003027 unsigned int i;
3028 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003029 unsigned int page_offset = rdata->page_offset;
Jeff Layton1c892542012-05-16 07:13:17 -04003030
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003031 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07003032 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003033 for (i = 0; i < nr_pages; i++) {
3034 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05003035 size_t n;
Long Li1dbe3462018-05-30 12:47:55 -07003036 unsigned int segment_size = rdata->pagesz;
3037
3038 if (i == 0)
3039 segment_size -= page_offset;
3040 else
3041 page_offset = 0;
3042
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003043
Al Viro71335662016-01-09 19:54:50 -05003044 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04003045 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003046 rdata->pages[i] = NULL;
3047 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04003048 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07003049 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003050 }
Long Li1dbe3462018-05-30 12:47:55 -07003051
Al Viro71335662016-01-09 19:54:50 -05003052 n = len;
Long Li1dbe3462018-05-30 12:47:55 -07003053 if (len >= segment_size)
Al Viro71335662016-01-09 19:54:50 -05003054 /* enough data to fill the page */
Long Li1dbe3462018-05-30 12:47:55 -07003055 n = segment_size;
3056 else
Al Viro71335662016-01-09 19:54:50 -05003057 rdata->tailsz = len;
Long Li1dbe3462018-05-30 12:47:55 -07003058 len -= n;
3059
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003060 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003061 result = copy_page_from_iter(
3062 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003063#ifdef CONFIG_CIFS_SMB_DIRECT
3064 else if (rdata->mr)
3065 result = n;
3066#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003067 else
Long Li1dbe3462018-05-30 12:47:55 -07003068 result = cifs_read_page_from_socket(
3069 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003070 if (result < 0)
3071 break;
3072
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003073 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003074 }
3075
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003076 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3077 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003078}
3079
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003080static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003081cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3082 struct cifs_readdata *rdata, unsigned int len)
3083{
3084 return uncached_fill_pages(server, rdata, NULL, len);
3085}
3086
3087static int
3088cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3089 struct cifs_readdata *rdata,
3090 struct iov_iter *iter)
3091{
3092 return uncached_fill_pages(server, rdata, iter, iter->count);
3093}
3094
3095static int
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003096cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003097 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3098 struct cifs_aio_ctx *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003100 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003101 unsigned int npages, rsize, credits;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003102 size_t cur_len;
3103 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003104 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003105 struct TCP_Server_Info *server;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003106
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003107 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003108
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003109 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3110 pid = open_file->pid;
3111 else
3112 pid = current->tgid;
3113
Jeff Layton1c892542012-05-16 07:13:17 -04003114 do {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003115 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3116 &rsize, &credits);
3117 if (rc)
3118 break;
3119
3120 cur_len = min_t(const size_t, len, rsize);
Jeff Layton1c892542012-05-16 07:13:17 -04003121 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003122
Jeff Layton1c892542012-05-16 07:13:17 -04003123 /* allocate a readdata struct */
3124 rdata = cifs_readdata_alloc(npages,
3125 cifs_uncached_readv_complete);
3126 if (!rdata) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003127 add_credits_and_wake_if(server, credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003128 rc = -ENOMEM;
Jeff Laytonbae9f742014-04-15 12:48:49 -04003129 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003131
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003132 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04003133 if (rc)
3134 goto error;
3135
3136 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003137 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003138 rdata->offset = offset;
3139 rdata->bytes = cur_len;
3140 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003141 rdata->pagesz = PAGE_SIZE;
Long Li1dbe3462018-05-30 12:47:55 -07003142 rdata->tailsz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003143 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003144 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003145 rdata->credits = credits;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003146 rdata->ctx = ctx;
3147 kref_get(&ctx->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003148
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003149 if (!rdata->cfile->invalidHandle ||
Germano Percossi1fa839b2017-04-07 12:29:38 +01003150 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003151 rc = server->ops->async_readv(rdata);
Jeff Layton1c892542012-05-16 07:13:17 -04003152error:
3153 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003154 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003155 kref_put(&rdata->refcount,
3156 cifs_uncached_readdata_release);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003157 if (rc == -EAGAIN)
3158 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003159 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160 }
Jeff Layton1c892542012-05-16 07:13:17 -04003161
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003162 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003163 offset += cur_len;
3164 len -= cur_len;
3165 } while (len > 0);
3166
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003167 return rc;
3168}
3169
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003170static void
3171collect_uncached_read_data(struct cifs_aio_ctx *ctx)
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003172{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003173 struct cifs_readdata *rdata, *tmp;
3174 struct iov_iter *to = &ctx->iter;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003175 struct cifs_sb_info *cifs_sb;
3176 struct cifs_tcon *tcon;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003177 unsigned int i;
3178 int rc;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003179
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003180 tcon = tlink_tcon(ctx->cfile->tlink);
3181 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003182
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003183 mutex_lock(&ctx->aio_mutex);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003184
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003185 if (list_empty(&ctx->list)) {
3186 mutex_unlock(&ctx->aio_mutex);
3187 return;
3188 }
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003189
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003190 rc = ctx->rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003191 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003192again:
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003193 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
Jeff Layton1c892542012-05-16 07:13:17 -04003194 if (!rc) {
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003195 if (!try_wait_for_completion(&rdata->done)) {
3196 mutex_unlock(&ctx->aio_mutex);
3197 return;
3198 }
3199
3200 if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003201 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003202 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003203 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003204
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003205 list_del_init(&rdata->list);
3206 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003207
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003208 /*
3209 * Got a part of data and then reconnect has
3210 * happened -- fill the buffer and continue
3211 * reading.
3212 */
3213 if (got_bytes && got_bytes < rdata->bytes) {
3214 rc = cifs_readdata_to_iov(rdata, to);
3215 if (rc) {
3216 kref_put(&rdata->refcount,
3217 cifs_uncached_readdata_release);
3218 continue;
3219 }
3220 }
3221
3222 rc = cifs_send_async_read(
3223 rdata->offset + got_bytes,
3224 rdata->bytes - got_bytes,
3225 rdata->cfile, cifs_sb,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003226 &tmp_list, ctx);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003227
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003228 list_splice(&tmp_list, &ctx->list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003229
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003230 kref_put(&rdata->refcount,
3231 cifs_uncached_readdata_release);
3232 goto again;
3233 } else if (rdata->result)
3234 rc = rdata->result;
3235 else
Jeff Layton1c892542012-05-16 07:13:17 -04003236 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003237
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003238 /* if there was a short read -- discard anything left */
3239 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3240 rc = -ENODATA;
Jeff Layton1c892542012-05-16 07:13:17 -04003241 }
3242 list_del_init(&rdata->list);
3243 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003245
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003246 for (i = 0; i < ctx->npages; i++) {
3247 if (ctx->should_dirty)
3248 set_page_dirty(ctx->bv[i].bv_page);
3249 put_page(ctx->bv[i].bv_page);
3250 }
Al Viro7f25bba2014-02-04 14:07:43 -05003251
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003252 ctx->total_len = ctx->len - iov_iter_count(to);
3253
3254 cifs_stats_bytes_read(tcon, ctx->total_len);
Jeff Layton1c892542012-05-16 07:13:17 -04003255
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003256 /* mask nodata case */
3257 if (rc == -ENODATA)
3258 rc = 0;
3259
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003260 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3261
3262 mutex_unlock(&ctx->aio_mutex);
3263
3264 if (ctx->iocb && ctx->iocb->ki_complete)
3265 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3266 else
3267 complete(&ctx->done);
3268}
3269
3270ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3271{
3272 struct file *file = iocb->ki_filp;
3273 ssize_t rc;
3274 size_t len;
3275 ssize_t total_read = 0;
3276 loff_t offset = iocb->ki_pos;
3277 struct cifs_sb_info *cifs_sb;
3278 struct cifs_tcon *tcon;
3279 struct cifsFileInfo *cfile;
3280 struct cifs_aio_ctx *ctx;
3281
3282 len = iov_iter_count(to);
3283 if (!len)
3284 return 0;
3285
3286 cifs_sb = CIFS_FILE_SB(file);
3287 cfile = file->private_data;
3288 tcon = tlink_tcon(cfile->tlink);
3289
3290 if (!tcon->ses->server->ops->async_readv)
3291 return -ENOSYS;
3292
3293 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3294 cifs_dbg(FYI, "attempting read on write only file instance\n");
3295
3296 ctx = cifs_aio_ctx_alloc();
3297 if (!ctx)
3298 return -ENOMEM;
3299
3300 ctx->cfile = cifsFileInfo_get(cfile);
3301
3302 if (!is_sync_kiocb(iocb))
3303 ctx->iocb = iocb;
3304
David Howells00e23702018-10-22 13:07:28 +01003305 if (iter_is_iovec(to))
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003306 ctx->should_dirty = true;
3307
3308 rc = setup_aio_ctx_iter(ctx, to, READ);
3309 if (rc) {
3310 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3311 return rc;
3312 }
3313
3314 len = ctx->len;
3315
3316 /* grab a lock here due to read response handlers can access ctx */
3317 mutex_lock(&ctx->aio_mutex);
3318
3319 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
3320
3321 /* if at least one read request send succeeded, then reset rc */
3322 if (!list_empty(&ctx->list))
3323 rc = 0;
3324
3325 mutex_unlock(&ctx->aio_mutex);
3326
3327 if (rc) {
3328 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3329 return rc;
3330 }
3331
3332 if (!is_sync_kiocb(iocb)) {
3333 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3334 return -EIOCBQUEUED;
3335 }
3336
3337 rc = wait_for_completion_killable(&ctx->done);
3338 if (rc) {
3339 mutex_lock(&ctx->aio_mutex);
3340 ctx->rc = rc = -EINTR;
3341 total_read = ctx->total_len;
3342 mutex_unlock(&ctx->aio_mutex);
3343 } else {
3344 rc = ctx->rc;
3345 total_read = ctx->total_len;
3346 }
3347
3348 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3349
Al Viro0165e812014-02-04 14:19:48 -05003350 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003351 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003352 return total_read;
3353 }
3354 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003355}
3356
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003357ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003358cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003359{
Al Viro496ad9a2013-01-23 17:07:38 -05003360 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003361 struct cifsInodeInfo *cinode = CIFS_I(inode);
3362 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3363 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3364 iocb->ki_filp->private_data;
3365 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3366 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003367
3368 /*
3369 * In strict cache mode we need to read from the server all the time
3370 * if we don't have level II oplock because the server can delay mtime
3371 * change - so we can't make a decision about inode invalidating.
3372 * And we can also fail with pagereading if there are mandatory locks
3373 * on pages affected by this read but not on the region from pos to
3374 * pos+len-1.
3375 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003376 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003377 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003378
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003379 if (cap_unix(tcon->ses) &&
3380 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3381 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003382 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003383
3384 /*
3385 * We need to hold the sem to be sure nobody modifies lock list
3386 * with a brlock that prevents reading.
3387 */
3388 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003389 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003390 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04003391 NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003392 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003393 up_read(&cinode->lock_sem);
3394 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003395}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003397static ssize_t
3398cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399{
3400 int rc = -EACCES;
3401 unsigned int bytes_read = 0;
3402 unsigned int total_read;
3403 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003404 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003406 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003407 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003408 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003409 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003411 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003412 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003413 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003415 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003416 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003418 /* FIXME: set up handlers for larger reads and/or convert to async */
3419 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3420
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303422 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003423 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303424 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003426 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003427 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003428 server = tcon->ses->server;
3429
3430 if (!server->ops->sync_read) {
3431 free_xid(xid);
3432 return -ENOSYS;
3433 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003435 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3436 pid = open_file->pid;
3437 else
3438 pid = current->tgid;
3439
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003441 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003443 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3444 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003445 do {
3446 current_read_size = min_t(uint, read_size - total_read,
3447 rsize);
3448 /*
3449 * For windows me and 9x we do not want to request more
3450 * than it negotiated since it will refuse the read
3451 * then.
3452 */
3453 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003454 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003455 current_read_size = min_t(uint,
3456 current_read_size, CIFSMaxBufSize);
3457 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003458 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003459 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460 if (rc != 0)
3461 break;
3462 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003463 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003464 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003465 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003466 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003467 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003468 &bytes_read, &cur_offset,
3469 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003470 } while (rc == -EAGAIN);
3471
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472 if (rc || (bytes_read == 0)) {
3473 if (total_read) {
3474 break;
3475 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003476 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477 return rc;
3478 }
3479 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003480 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003481 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482 }
3483 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003484 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485 return total_read;
3486}
3487
Jeff Laytonca83ce32011-04-12 09:13:44 -04003488/*
3489 * If the page is mmap'ed into a process' page tables, then we need to make
3490 * sure that it doesn't change while being written back.
3491 */
Souptick Joardera5240cb2018-04-15 00:58:25 +05303492static vm_fault_t
Dave Jiang11bac802017-02-24 14:56:41 -08003493cifs_page_mkwrite(struct vm_fault *vmf)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003494{
3495 struct page *page = vmf->page;
3496
3497 lock_page(page);
3498 return VM_FAULT_LOCKED;
3499}
3500
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003501static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003502 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003503 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003504 .page_mkwrite = cifs_page_mkwrite,
3505};
3506
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003507int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3508{
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003509 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05003510 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003511
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003512 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003513
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003514 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003515 rc = cifs_zap_mapping(inode);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003516 if (!rc)
3517 rc = generic_file_mmap(file, vma);
3518 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003519 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003520
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003521 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003522 return rc;
3523}
3524
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3526{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527 int rc, xid;
3528
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003529 xid = get_xid();
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003530
Jeff Laytonabab0952010-02-12 07:44:18 -05003531 rc = cifs_revalidate_file(file);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003532 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003533 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3534 rc);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003535 if (!rc)
3536 rc = generic_file_mmap(file, vma);
3537 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003538 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003539
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003540 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541 return rc;
3542}
3543
Jeff Layton0471ca32012-05-16 07:13:16 -04003544static void
3545cifs_readv_complete(struct work_struct *work)
3546{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003547 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003548 struct cifs_readdata *rdata = container_of(work,
3549 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003550
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003551 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003552 for (i = 0; i < rdata->nr_pages; i++) {
3553 struct page *page = rdata->pages[i];
3554
Jeff Layton0471ca32012-05-16 07:13:16 -04003555 lru_cache_add_file(page);
3556
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003557 if (rdata->result == 0 ||
3558 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003559 flush_dcache_page(page);
3560 SetPageUptodate(page);
3561 }
3562
3563 unlock_page(page);
3564
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003565 if (rdata->result == 0 ||
3566 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04003567 cifs_readpage_to_fscache(rdata->mapping->host, page);
3568
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003569 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003570
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003571 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003572 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003573 }
Jeff Layton6993f742012-05-16 07:13:17 -04003574 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003575}
3576
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003577static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003578readpages_fill_pages(struct TCP_Server_Info *server,
3579 struct cifs_readdata *rdata, struct iov_iter *iter,
3580 unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003581{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003582 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003583 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003584 u64 eof;
3585 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003586 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003587 unsigned int page_offset = rdata->page_offset;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003588
3589 /* determine the eof that the server (probably) has */
3590 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003591 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003592 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003593
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003594 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003595 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003596 for (i = 0; i < nr_pages; i++) {
3597 struct page *page = rdata->pages[i];
Long Li1dbe3462018-05-30 12:47:55 -07003598 unsigned int to_read = rdata->pagesz;
3599 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003600
Long Li1dbe3462018-05-30 12:47:55 -07003601 if (i == 0)
3602 to_read -= page_offset;
3603 else
3604 page_offset = 0;
3605
3606 n = to_read;
3607
3608 if (len >= to_read) {
3609 len -= to_read;
Jeff Layton8321fec2012-09-19 06:22:32 -07003610 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003611 /* enough for partial page, fill and zero the rest */
Long Li1dbe3462018-05-30 12:47:55 -07003612 zero_user(page, len + page_offset, to_read - len);
Al Viro71335662016-01-09 19:54:50 -05003613 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07003614 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003615 } else if (page->index > eof_index) {
3616 /*
3617 * The VFS will not try to do readahead past the
3618 * i_size, but it's possible that we have outstanding
3619 * writes with gaps in the middle and the i_size hasn't
3620 * caught up yet. Populate those with zeroed out pages
3621 * to prevent the VFS from repeatedly attempting to
3622 * fill them until the writes are flushed.
3623 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003624 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003625 lru_cache_add_file(page);
3626 flush_dcache_page(page);
3627 SetPageUptodate(page);
3628 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003629 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003630 rdata->pages[i] = NULL;
3631 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003632 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003633 } else {
3634 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003635 lru_cache_add_file(page);
3636 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003637 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003638 rdata->pages[i] = NULL;
3639 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003640 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003641 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003642
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003643 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003644 result = copy_page_from_iter(
3645 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003646#ifdef CONFIG_CIFS_SMB_DIRECT
3647 else if (rdata->mr)
3648 result = n;
3649#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003650 else
Long Li1dbe3462018-05-30 12:47:55 -07003651 result = cifs_read_page_from_socket(
3652 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003653 if (result < 0)
3654 break;
3655
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003656 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003657 }
3658
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003659 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3660 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003661}
3662
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003663static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003664cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3665 struct cifs_readdata *rdata, unsigned int len)
3666{
3667 return readpages_fill_pages(server, rdata, NULL, len);
3668}
3669
3670static int
3671cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
3672 struct cifs_readdata *rdata,
3673 struct iov_iter *iter)
3674{
3675 return readpages_fill_pages(server, rdata, iter, iter->count);
3676}
3677
3678static int
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003679readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3680 unsigned int rsize, struct list_head *tmplist,
3681 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3682{
3683 struct page *page, *tpage;
3684 unsigned int expected_index;
3685 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07003686 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003687
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003688 INIT_LIST_HEAD(tmplist);
3689
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003690 page = list_entry(page_list->prev, struct page, lru);
3691
3692 /*
3693 * Lock the page and put it in the cache. Since no one else
3694 * should have access to this page, we're safe to simply set
3695 * PG_locked without checking it first.
3696 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003697 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003698 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07003699 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003700
3701 /* give up if we can't stick it in the cache */
3702 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003703 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003704 return rc;
3705 }
3706
3707 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003708 *offset = (loff_t)page->index << PAGE_SHIFT;
3709 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003710 *nr_pages = 1;
3711 list_move_tail(&page->lru, tmplist);
3712
3713 /* now try and add more pages onto the request */
3714 expected_index = page->index + 1;
3715 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3716 /* discontinuity ? */
3717 if (page->index != expected_index)
3718 break;
3719
3720 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003721 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003722 break;
3723
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003724 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07003725 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003726 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003727 break;
3728 }
3729 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003730 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003731 expected_index++;
3732 (*nr_pages)++;
3733 }
3734 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735}
3736
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737static int cifs_readpages(struct file *file, struct address_space *mapping,
3738 struct list_head *page_list, unsigned num_pages)
3739{
Jeff Layton690c5e32011-10-19 15:30:16 -04003740 int rc;
3741 struct list_head tmplist;
3742 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04003743 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003744 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04003745 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746
Jeff Layton690c5e32011-10-19 15:30:16 -04003747 /*
Suresh Jayaraman566982362010-07-05 18:13:25 +05303748 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3749 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00003750 *
3751 * After this point, every page in the list might have PG_fscache set,
3752 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman566982362010-07-05 18:13:25 +05303753 */
3754 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3755 &num_pages);
3756 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003757 return rc;
Suresh Jayaraman566982362010-07-05 18:13:25 +05303758
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003759 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3760 pid = open_file->pid;
3761 else
3762 pid = current->tgid;
3763
Jeff Layton690c5e32011-10-19 15:30:16 -04003764 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003765 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766
Joe Perchesf96637b2013-05-04 22:12:25 -05003767 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3768 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003769
3770 /*
3771 * Start with the page at end of list and move it to private
3772 * list. Do the same with any following pages until we hit
3773 * the rsize limit, hit an index discontinuity, or run out of
3774 * pages. Issue the async read and then start the loop again
3775 * until the list is empty.
3776 *
3777 * Note that list order is important. The page_list is in
3778 * the order of declining indexes. When we put the pages in
3779 * the rdata->pages, then we want them in increasing order.
3780 */
3781 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003782 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04003783 loff_t offset;
3784 struct page *page, *tpage;
3785 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003786 unsigned credits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003788 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3789 &rsize, &credits);
3790 if (rc)
3791 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792
Jeff Layton690c5e32011-10-19 15:30:16 -04003793 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003794 * Give up immediately if rsize is too small to read an entire
3795 * page. The VFS will fall back to readpage. We should never
3796 * reach this point however since we set ra_pages to 0 when the
3797 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04003798 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003799 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003800 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003801 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003804 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3805 &nr_pages, &offset, &bytes);
3806 if (rc) {
3807 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003808 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04003809 }
3810
Jeff Layton0471ca32012-05-16 07:13:16 -04003811 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003812 if (!rdata) {
3813 /* best to give up if we're out of mem */
3814 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3815 list_del(&page->lru);
3816 lru_cache_add_file(page);
3817 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003818 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04003819 }
3820 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003821 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04003822 break;
3823 }
3824
Jeff Layton6993f742012-05-16 07:13:17 -04003825 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003826 rdata->mapping = mapping;
3827 rdata->offset = offset;
3828 rdata->bytes = bytes;
3829 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003830 rdata->pagesz = PAGE_SIZE;
Long Li1dbe3462018-05-30 12:47:55 -07003831 rdata->tailsz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003832 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003833 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003834 rdata->credits = credits;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003835
3836 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3837 list_del(&page->lru);
3838 rdata->pages[rdata->nr_pages++] = page;
3839 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003840
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003841 if (!rdata->cfile->invalidHandle ||
Germano Percossi1fa839b2017-04-07 12:29:38 +01003842 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003843 rc = server->ops->async_readv(rdata);
3844 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003845 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003846 for (i = 0; i < rdata->nr_pages; i++) {
3847 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003848 lru_cache_add_file(page);
3849 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003850 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04003852 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04003853 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854 break;
3855 }
Jeff Layton6993f742012-05-16 07:13:17 -04003856
3857 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858 }
3859
David Howells54afa992013-09-04 17:10:39 +00003860 /* Any pages that have been shown to fscache but didn't get added to
3861 * the pagecache must be uncached before they get returned to the
3862 * allocator.
3863 */
3864 cifs_fscache_readpages_cancel(mapping->host, page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003865 return rc;
3866}
3867
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01003868/*
3869 * cifs_readpage_worker must be called with the page pinned
3870 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871static int cifs_readpage_worker(struct file *file, struct page *page,
3872 loff_t *poffset)
3873{
3874 char *read_data;
3875 int rc;
3876
Suresh Jayaraman566982362010-07-05 18:13:25 +05303877 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003878 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05303879 if (rc == 0)
3880 goto read_complete;
3881
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882 read_data = kmap(page);
3883 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003884
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003885 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003886
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887 if (rc < 0)
3888 goto io_error;
3889 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003890 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003891
Al Viro496ad9a2013-01-23 17:07:38 -05003892 file_inode(file)->i_atime =
Deepa Dinamanic2050a42016-09-14 07:48:06 -07003893 current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003894
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003895 if (PAGE_SIZE > rc)
3896 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897
3898 flush_dcache_page(page);
3899 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303900
3901 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003902 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303903
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003905
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003907 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003908 unlock_page(page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05303909
3910read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003911 return rc;
3912}
3913
3914static int cifs_readpage(struct file *file, struct page *page)
3915{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003916 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003917 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003918 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003919
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003920 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921
3922 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303923 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003924 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303925 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003926 }
3927
Joe Perchesf96637b2013-05-04 22:12:25 -05003928 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003929 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003930
3931 rc = cifs_readpage_worker(file, page, &offset);
3932
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003933 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934 return rc;
3935}
3936
Steve Frencha403a0a2007-07-26 15:54:16 +00003937static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3938{
3939 struct cifsFileInfo *open_file;
Steve French3afca262016-09-22 18:58:16 -05003940 struct cifs_tcon *tcon =
3941 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
Steve Frencha403a0a2007-07-26 15:54:16 +00003942
Steve French3afca262016-09-22 18:58:16 -05003943 spin_lock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003944 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003945 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French3afca262016-09-22 18:58:16 -05003946 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003947 return 1;
3948 }
3949 }
Steve French3afca262016-09-22 18:58:16 -05003950 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003951 return 0;
3952}
3953
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954/* We do not want to update the file size from server for inodes
3955 open for write - to avoid races with writepage extending
3956 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003957 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958 but this is tricky to do without racing with writebehind
3959 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003960bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003961{
Steve Frencha403a0a2007-07-26 15:54:16 +00003962 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003963 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003964
Steve Frencha403a0a2007-07-26 15:54:16 +00003965 if (is_inode_writable(cifsInode)) {
3966 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003967 struct cifs_sb_info *cifs_sb;
3968
Steve Frenchc32a0b62006-01-12 14:41:28 -08003969 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003970 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003971 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003972 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003973 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003974 }
3975
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003976 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003977 return true;
Steve French7ba526312007-02-08 18:14:13 +00003978
Steve French4b18f2a2008-04-29 00:06:05 +00003979 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003980 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003981 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003982}
3983
Nick Piggind9414772008-09-24 11:32:59 -04003984static int cifs_write_begin(struct file *file, struct address_space *mapping,
3985 loff_t pos, unsigned len, unsigned flags,
3986 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003987{
Sachin Prabhu466bd312013-09-13 14:11:57 +01003988 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003989 pgoff_t index = pos >> PAGE_SHIFT;
3990 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003991 loff_t page_start = pos & PAGE_MASK;
3992 loff_t i_size;
3993 struct page *page;
3994 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995
Joe Perchesf96637b2013-05-04 22:12:25 -05003996 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003997
Sachin Prabhu466bd312013-09-13 14:11:57 +01003998start:
Nick Piggin54566b22009-01-04 12:00:53 -08003999 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004000 if (!page) {
4001 rc = -ENOMEM;
4002 goto out;
4003 }
Nick Piggind9414772008-09-24 11:32:59 -04004004
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004005 if (PageUptodate(page))
4006 goto out;
Steve French8a236262007-03-06 00:31:00 +00004007
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004008 /*
4009 * If we write a full page it will be up to date, no need to read from
4010 * the server. If the write is short, we'll end up doing a sync write
4011 * instead.
4012 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004013 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004014 goto out;
4015
4016 /*
4017 * optimize away the read when we have an oplock, and we're not
4018 * expecting to use any of the data we'd be reading in. That
4019 * is, when the page lies beyond the EOF, or straddles the EOF
4020 * and the write will cover all of the existing data.
4021 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004022 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004023 i_size = i_size_read(mapping->host);
4024 if (page_start >= i_size ||
4025 (offset == 0 && (pos + len) >= i_size)) {
4026 zero_user_segments(page, 0, offset,
4027 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004028 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004029 /*
4030 * PageChecked means that the parts of the page
4031 * to which we're not writing are considered up
4032 * to date. Once the data is copied to the
4033 * page, it can be set uptodate.
4034 */
4035 SetPageChecked(page);
4036 goto out;
4037 }
4038 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039
Sachin Prabhu466bd312013-09-13 14:11:57 +01004040 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004041 /*
4042 * might as well read a page, it is fast enough. If we get
4043 * an error, we don't need to return it. cifs_write_end will
4044 * do a sync write instead since PG_uptodate isn't set.
4045 */
4046 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004047 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004048 oncethru = 1;
4049 goto start;
Steve French8a236262007-03-06 00:31:00 +00004050 } else {
4051 /* we could try using another file handle if there is one -
4052 but how would we lock it to prevent close of that handle
4053 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04004054 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00004055 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004056out:
4057 *pagep = page;
4058 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059}
4060
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304061static int cifs_release_page(struct page *page, gfp_t gfp)
4062{
4063 if (PagePrivate(page))
4064 return 0;
4065
4066 return cifs_fscache_release_page(page, gfp);
4067}
4068
Lukas Czernerd47992f2013-05-21 23:17:23 -04004069static void cifs_invalidate_page(struct page *page, unsigned int offset,
4070 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304071{
4072 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4073
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004074 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304075 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4076}
4077
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004078static int cifs_launder_page(struct page *page)
4079{
4080 int rc = 0;
4081 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004082 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004083 struct writeback_control wbc = {
4084 .sync_mode = WB_SYNC_ALL,
4085 .nr_to_write = 0,
4086 .range_start = range_start,
4087 .range_end = range_end,
4088 };
4089
Joe Perchesf96637b2013-05-04 22:12:25 -05004090 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004091
4092 if (clear_page_dirty_for_io(page))
4093 rc = cifs_writepage_locked(page, &wbc);
4094
4095 cifs_fscache_invalidate_page(page, page->mapping->host);
4096 return rc;
4097}
4098
Tejun Heo9b646972010-07-20 22:09:02 +02004099void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04004100{
4101 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4102 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00004103 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004104 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004105 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004106 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004107 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004108
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004109 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10004110 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004111
4112 server->ops->downgrade_oplock(server, cinode,
4113 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
4114
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004115 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004116 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004117 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4118 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004119 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004120 }
4121
Jeff Layton3bc303c2009-09-21 06:47:50 -04004122 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004123 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05004124 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00004125 else
Al Viro8737c932009-12-24 06:47:55 -05004126 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004127 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004128 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004129 rc = filemap_fdatawait(inode->i_mapping);
4130 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004131 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004132 }
Joe Perchesf96637b2013-05-04 22:12:25 -05004133 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004134 }
4135
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004136 rc = cifs_push_locks(cfile);
4137 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004138 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004139
Jeff Layton3bc303c2009-09-21 06:47:50 -04004140 /*
4141 * releasing stale oplock after recent reconnect of smb session using
4142 * a now incorrect file handle is not a data integrity issue but do
4143 * not bother sending an oplock release if session to server still is
4144 * disconnected since oplock already released by the server
4145 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00004146 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004147 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4148 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05004149 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004150 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004151 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004152}
4153
Steve Frenchdca69282013-11-11 16:42:37 -06004154/*
4155 * The presence of cifs_direct_io() in the address space ops vector
4156 * allowes open() O_DIRECT flags which would have failed otherwise.
4157 *
4158 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4159 * so this method should never be called.
4160 *
4161 * Direct IO is not yet supported in the cached mode.
4162 */
4163static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07004164cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06004165{
4166 /*
4167 * FIXME
4168 * Eventually need to support direct IO for non forcedirectio mounts
4169 */
4170 return -EINVAL;
4171}
4172
4173
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004174const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175 .readpage = cifs_readpage,
4176 .readpages = cifs_readpages,
4177 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07004178 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004179 .write_begin = cifs_write_begin,
4180 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304182 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06004183 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304184 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004185 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004187
4188/*
4189 * cifs_readpages requires the server to support a buffer large enough to
4190 * contain the header plus one complete page of data. Otherwise, we need
4191 * to leave cifs_readpages out of the address space operations.
4192 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004193const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004194 .readpage = cifs_readpage,
4195 .writepage = cifs_writepage,
4196 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004197 .write_begin = cifs_write_begin,
4198 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004199 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304200 .releasepage = cifs_release_page,
4201 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004202 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004203};