blob: 94c3575e850c4a1d0e13ddd80145aca36bf5db2e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Long Libd3dcc62017-11-22 17:38:47 -070045#include "smbdirect.h"
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900143 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400144 cifs_put_tlink(tlink);
145
146 if (rc)
147 goto posix_open_ret;
148
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
151
152 if (!pinode)
153 goto posix_open_ret; /* caller does not need info */
154
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
161 if (!*pinode) {
162 rc = -ENOMEM;
163 goto posix_open_ret;
164 }
165 } else {
166 cifs_fattr_to_inode(*pinode, &fattr);
167 }
168
169posix_open_ret:
170 kfree(presp_data);
171 return rc;
172}
173
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174static int
175cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300178{
179 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700180 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500182 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300183 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700184 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400185 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700187 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
Steve French1013e762017-09-22 01:40:27 -0500227 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
228 if (f_flags & O_SYNC)
229 create_options |= CREATE_WRITE_THROUGH;
230
231 if (f_flags & O_DIRECT)
232 create_options |= CREATE_NO_BUFFER;
233
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400234 oparms.tcon = tcon;
235 oparms.cifs_sb = cifs_sb;
236 oparms.desired_access = desired_access;
237 oparms.create_options = create_options;
238 oparms.disposition = disposition;
239 oparms.path = full_path;
240 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400241 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400242
243 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300244
245 if (rc)
246 goto out;
247
248 if (tcon->unix_ext)
249 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
250 xid);
251 else
252 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600253 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300254
255out:
256 kfree(buf);
257 return rc;
258}
259
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400260static bool
261cifs_has_mand_locks(struct cifsInodeInfo *cinode)
262{
263 struct cifs_fid_locks *cur;
264 bool has_locks = false;
265
266 down_read(&cinode->lock_sem);
267 list_for_each_entry(cur, &cinode->llist, llist) {
268 if (!list_empty(&cur->locks)) {
269 has_locks = true;
270 break;
271 }
272 }
273 up_read(&cinode->lock_sem);
274 return has_locks;
275}
276
Jeff Layton15ecb432010-10-15 15:34:02 -0400277struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700278cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400279 struct tcon_link *tlink, __u32 oplock)
280{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500281 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000282 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700283 struct cifsInodeInfo *cinode = CIFS_I(inode);
284 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700285 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700286 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400287 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400288
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700289 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
290 if (cfile == NULL)
291 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400292
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700293 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
294 if (!fdlocks) {
295 kfree(cfile);
296 return NULL;
297 }
298
299 INIT_LIST_HEAD(&fdlocks->locks);
300 fdlocks->cfile = cfile;
301 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700302 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700303 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700304 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700305
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700306 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700307 cfile->pid = current->tgid;
308 cfile->uid = current_fsuid();
309 cfile->dentry = dget(dentry);
310 cfile->f_flags = file->f_flags;
311 cfile->invalidHandle = false;
312 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700313 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700314 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500315 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400316
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100317 cifs_sb_active(inode->i_sb);
318
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400319 /*
320 * If the server returned a read oplock and we have mandatory brlocks,
321 * set oplock level to None.
322 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400323 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500324 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400325 oplock = 0;
326 }
327
Steve French3afca262016-09-22 18:58:16 -0500328 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400329 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700330 oplock = fid->pending_open->oplock;
331 list_del(&fid->pending_open->olist);
332
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400333 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400334 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700335
336 list_add(&cfile->tlist, &tcon->openFileList);
Steve Frenchfae80442018-10-19 17:14:32 -0500337 atomic_inc(&tcon->num_local_opens);
Steve French3afca262016-09-22 18:58:16 -0500338
Jeff Layton15ecb432010-10-15 15:34:02 -0400339 /* if readable file instance put first in list*/
340 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700341 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400342 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700343 list_add_tail(&cfile->flist, &cinode->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500344 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400345
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400346 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400347 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400348
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700349 file->private_data = cfile;
350 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400351}
352
Jeff Layton764a1b12012-07-25 14:59:54 -0400353struct cifsFileInfo *
354cifsFileInfo_get(struct cifsFileInfo *cifs_file)
355{
Steve French3afca262016-09-22 18:58:16 -0500356 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400357 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500358 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400359 return cifs_file;
360}
361
Steve Frenchcdff08e2010-10-21 22:46:14 +0000362/*
363 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400364 * the filehandle out on the server. Must be called without holding
Steve French3afca262016-09-22 18:58:16 -0500365 * tcon->open_file_lock and cifs_file->file_info_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000366 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400367void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
368{
David Howells2b0143b2015-03-17 22:25:59 +0000369 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000370 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700371 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300372 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100373 struct super_block *sb = inode->i_sb;
374 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000375 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700376 struct cifs_fid fid;
377 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000378 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000379
Steve French3afca262016-09-22 18:58:16 -0500380 spin_lock(&tcon->open_file_lock);
381
382 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400383 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500384 spin_unlock(&cifs_file->file_info_lock);
385 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000386 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400387 }
Steve French3afca262016-09-22 18:58:16 -0500388 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000389
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700390 if (server->ops->get_lease_key)
391 server->ops->get_lease_key(inode, &fid);
392
393 /* store open in pending opens to make sure we don't miss lease break */
394 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
395
Steve Frenchcdff08e2010-10-21 22:46:14 +0000396 /* remove it from the lists */
397 list_del(&cifs_file->flist);
398 list_del(&cifs_file->tlist);
Steve Frenchfae80442018-10-19 17:14:32 -0500399 atomic_dec(&tcon->num_local_opens);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000400
401 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500402 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000403 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700404 /*
405 * In strict cache mode we need invalidate mapping on the last
406 * close because it may cause a error when we open this file
407 * again and get at least level II oplock.
408 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300409 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400410 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300411 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000412 }
Steve French3afca262016-09-22 18:58:16 -0500413
414 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000415
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000416 oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
Jeff Laytonad635942011-07-26 12:20:17 -0400417
Steve Frenchcdff08e2010-10-21 22:46:14 +0000418 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700419 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400420 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700421
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400422 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700423 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400424 server->ops->close(xid, tcon, &cifs_file->fid);
425 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000426 }
427
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000428 if (oplock_break_cancelled)
429 cifs_done_oplock_break(cifsi);
430
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700431 cifs_del_pending_open(&open);
432
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700433 /*
434 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000435 * is closed anyway.
436 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700437 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700438 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000439 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400440 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000441 kfree(li);
442 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700443 list_del(&cifs_file->llist->llist);
444 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700445 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000446
447 cifs_put_tlink(cifs_file->tlink);
448 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100449 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000450 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400451}
452
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700454
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455{
456 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400457 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400458 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700460 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000461 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400462 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700463 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300465 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700466 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700467 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400469 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
471 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400472 tlink = cifs_sb_tlink(cifs_sb);
473 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400474 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400475 return PTR_ERR(tlink);
476 }
477 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700478 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500480 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530482 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400483 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 }
485
Joe Perchesf96637b2013-05-04 22:12:25 -0500486 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000487 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000488
Namjae Jeon787aded2014-08-22 14:22:51 +0900489 if (file->f_flags & O_DIRECT &&
490 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
491 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
492 file->f_op = &cifs_file_direct_nobrl_ops;
493 else
494 file->f_op = &cifs_file_direct_ops;
495 }
496
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700497 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000498 oplock = REQ_OPLOCK;
499 else
500 oplock = 0;
501
Steve French64cc2c62009-03-04 19:54:08 +0000502 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400503 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
504 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000505 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400506 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000507 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700508 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000509 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500510 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300511 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000512 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
513 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500514 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
515 tcon->ses->serverName,
516 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000517 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000518 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
519 (rc != -EOPNOTSUPP)) /* path not found or net err */
520 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700521 /*
522 * Else fallthrough to retry open the old way on network i/o
523 * or DFS errors.
524 */
Steve French276a74a2009-03-03 18:00:34 +0000525 }
526
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700527 if (server->ops->get_lease_key)
528 server->ops->get_lease_key(inode, &fid);
529
530 cifs_add_pending_open(&fid, tlink, &open);
531
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300532 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700533 if (server->ops->get_lease_key)
534 server->ops->get_lease_key(inode, &fid);
535
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300536 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700537 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700538 if (rc) {
539 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300540 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700541 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300542 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400543
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700544 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
545 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700546 if (server->ops->close)
547 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700548 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 rc = -ENOMEM;
550 goto out;
551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530553 cifs_fscache_set_inode_cookie(inode, file);
554
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300555 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700556 /*
557 * Time to set mode which we can not set earlier due to
558 * problems creating new read-only files.
559 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300560 struct cifs_unix_set_info_args args = {
561 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800562 .uid = INVALID_UID, /* no change */
563 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300564 .ctime = NO_CHANGE_64,
565 .atime = NO_CHANGE_64,
566 .mtime = NO_CHANGE_64,
567 .device = 0,
568 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700569 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
570 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 }
572
573out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400575 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400576 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 return rc;
578}
579
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400580static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
581
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700582/*
583 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400584 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700585 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400586static int
587cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400589 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000590 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400591 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 int rc = 0;
593
Rabin Vincent560d3882017-05-03 17:17:21 +0200594 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400595 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400596 /* can cache locks - no need to relock */
597 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400598 return rc;
599 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400601 if (cap_unix(tcon->ses) &&
602 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
603 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
604 rc = cifs_push_posix_locks(cfile);
605 else
606 rc = tcon->ses->server->ops->push_mand_locks(cfile);
607
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400608 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 return rc;
610}
611
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700612static int
613cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614{
615 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400616 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400617 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000619 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700620 struct TCP_Server_Info *server;
621 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000622 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700624 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500626 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400627 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400629 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700630 mutex_lock(&cfile->fh_mutex);
631 if (!cfile->invalidHandle) {
632 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530633 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400634 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530635 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 }
637
David Howells2b0143b2015-03-17 22:25:59 +0000638 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700640 tcon = tlink_tcon(cfile->tlink);
641 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000642
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700643 /*
644 * Can not grab rename sem here because various ops, including those
645 * that already have the rename sem can end up causing writepage to get
646 * called and if the server was down that means we end up here, and we
647 * can never tell if the caller already has the rename_sem.
648 */
649 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000651 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700652 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400653 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000654 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 }
656
Joe Perchesf96637b2013-05-04 22:12:25 -0500657 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
658 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300660 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 oplock = REQ_OPLOCK;
662 else
Steve French4b18f2a2008-04-29 00:06:05 +0000663 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400665 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000666 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400667 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400668 /*
669 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
670 * original open. Must mask them off for a reopen.
671 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700672 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400673 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400674
Jeff Layton2422f672010-06-16 13:40:16 -0400675 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700676 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400677 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000678 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500679 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200680 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000681 goto reopen_success;
682 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700683 /*
684 * fallthrough to retry open the old way on errors, especially
685 * in the reconnect path it is important to retry hard
686 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000687 }
688
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700689 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000690
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500691 if (backup_cred(cifs_sb))
692 create_options |= CREATE_OPEN_BACKUP_INTENT;
693
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700694 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400695 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700696
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400697 oparms.tcon = tcon;
698 oparms.cifs_sb = cifs_sb;
699 oparms.desired_access = desired_access;
700 oparms.create_options = create_options;
701 oparms.disposition = disposition;
702 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400703 oparms.fid = &cfile->fid;
704 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400705
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700706 /*
707 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400708 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700709 * file might have write behind data that needs to be flushed and server
710 * version of file size can be stale. If we knew for sure that inode was
711 * not dirty locally we could do this.
712 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400713 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400714 if (rc == -ENOENT && oparms.reconnect == false) {
715 /* durable handle timeout is expired - open the file again */
716 rc = server->ops->open(xid, &oparms, &oplock, NULL);
717 /* indicate that we need to relock the file */
718 oparms.reconnect = true;
719 }
720
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700722 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500723 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
724 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400725 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 }
Jeff Layton15886172010-10-15 15:33:59 -0400727
728reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700729 cfile->invalidHandle = false;
730 mutex_unlock(&cfile->fh_mutex);
731 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400732
733 if (can_flush) {
734 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400735 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400736
Jeff Layton15886172010-10-15 15:33:59 -0400737 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700738 rc = cifs_get_inode_info_unix(&inode, full_path,
739 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400740 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700741 rc = cifs_get_inode_info(&inode, full_path, NULL,
742 inode->i_sb, xid, NULL);
743 }
744 /*
745 * Else we are writing out data to server already and could deadlock if
746 * we tried to flush data, and since we do not know if we have data that
747 * would invalidate the current end of file on the server we can not go
748 * to the server to get the new inode info.
749 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300750
Pavel Shilovskyde740252016-10-11 15:34:07 -0700751 /*
752 * If the server returned a read oplock and we have mandatory brlocks,
753 * set oplock level to None.
754 */
755 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
756 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
757 oplock = 0;
758 }
759
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400760 server->ops->set_fid(cfile, &cfile->fid, oplock);
761 if (oparms.reconnect)
762 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400763
764reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400766 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 return rc;
768}
769
770int cifs_close(struct inode *inode, struct file *file)
771{
Jeff Layton77970692011-04-05 16:23:47 -0700772 if (file->private_data != NULL) {
773 cifsFileInfo_put(file->private_data);
774 file->private_data = NULL;
775 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776
Steve Frenchcdff08e2010-10-21 22:46:14 +0000777 /* return code from the ->release op is always ignored */
778 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779}
780
Steve French52ace1e2016-09-22 19:23:56 -0500781void
782cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
783{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700784 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500785 struct list_head *tmp;
786 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700787 struct list_head tmp_list;
788
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800789 if (!tcon->use_persistent || !tcon->need_reopen_files)
790 return;
791
792 tcon->need_reopen_files = false;
793
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700794 cifs_dbg(FYI, "Reopen persistent handles");
795 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500796
797 /* list all files open on tree connection, reopen resilient handles */
798 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700799 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500800 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700801 if (!open_file->invalidHandle)
802 continue;
803 cifsFileInfo_get(open_file);
804 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500805 }
806 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700807
808 list_for_each_safe(tmp, tmp1, &tmp_list) {
809 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800810 if (cifs_reopen_file(open_file, false /* do not flush */))
811 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700812 list_del_init(&open_file->rlist);
813 cifsFileInfo_put(open_file);
814 }
Steve French52ace1e2016-09-22 19:23:56 -0500815}
816
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817int cifs_closedir(struct inode *inode, struct file *file)
818{
819 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400820 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700821 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700822 struct cifs_tcon *tcon;
823 struct TCP_Server_Info *server;
824 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
Joe Perchesf96637b2013-05-04 22:12:25 -0500826 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700828 if (cfile == NULL)
829 return rc;
830
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400831 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700832 tcon = tlink_tcon(cfile->tlink);
833 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
Joe Perchesf96637b2013-05-04 22:12:25 -0500835 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500836 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400837 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700838 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500839 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700840 if (server->ops->close_dir)
841 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
842 else
843 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500844 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700845 /* not much we can do if it fails anyway, ignore rc */
846 rc = 0;
847 } else
Steve French3afca262016-09-22 18:58:16 -0500848 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700850 buf = cfile->srch_inf.ntwrk_buf_start;
851 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500852 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700853 cfile->srch_inf.ntwrk_buf_start = NULL;
854 if (cfile->srch_inf.smallBuf)
855 cifs_small_buf_release(buf);
856 else
857 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700859
860 cifs_put_tlink(cfile->tlink);
861 kfree(file->private_data);
862 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400864 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 return rc;
866}
867
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400868static struct cifsLockInfo *
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000869cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000870{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400871 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000872 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400873 if (!lock)
874 return lock;
875 lock->offset = offset;
876 lock->length = length;
877 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400878 lock->pid = current->tgid;
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000879 lock->flags = flags;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400880 INIT_LIST_HEAD(&lock->blist);
881 init_waitqueue_head(&lock->block_q);
882 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400883}
884
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700885void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400886cifs_del_lock_waiters(struct cifsLockInfo *lock)
887{
888 struct cifsLockInfo *li, *tmp;
889 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
890 list_del_init(&li->blist);
891 wake_up(&li->block_q);
892 }
893}
894
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400895#define CIFS_LOCK_OP 0
896#define CIFS_READ_OP 1
897#define CIFS_WRITE_OP 2
898
899/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400900static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700901cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000902 __u64 length, __u8 type, __u16 flags,
903 struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400904 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400905{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300906 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700907 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300908 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400909
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700910 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400911 if (offset + length <= li->offset ||
912 offset >= li->offset + li->length)
913 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400914 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
915 server->ops->compare_fids(cfile, cur_cfile)) {
916 /* shared lock prevents write op through the same fid */
917 if (!(li->type & server->vals->shared_lock_type) ||
918 rw_check != CIFS_WRITE_OP)
919 continue;
920 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700921 if ((type & server->vals->shared_lock_type) &&
922 ((server->ops->compare_fids(cfile, cur_cfile) &&
923 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400924 continue;
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000925 if (rw_check == CIFS_LOCK_OP &&
926 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
927 server->ops->compare_fids(cfile, cur_cfile))
928 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700929 if (conf_lock)
930 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700931 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400932 }
933 return false;
934}
935
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700936bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300937cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000938 __u8 type, __u16 flags,
939 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400940{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300941 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700942 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000943 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300944
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700945 list_for_each_entry(cur, &cinode->llist, llist) {
946 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000947 flags, cfile, conf_lock,
948 rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300949 if (rc)
950 break;
951 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300952
953 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400954}
955
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300956/*
957 * Check if there is another lock that prevents us to set the lock (mandatory
958 * style). If such a lock exists, update the flock structure with its
959 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
960 * or leave it the same if we can't. Returns 0 if we don't need to request to
961 * the server or 1 otherwise.
962 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400963static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300964cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
965 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400966{
967 int rc = 0;
968 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000969 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300970 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400971 bool exist;
972
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700973 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400974
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300975 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000976 flock->fl_flags, &conf_lock,
977 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400978 if (exist) {
979 flock->fl_start = conf_lock->offset;
980 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
981 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300982 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400983 flock->fl_type = F_RDLCK;
984 else
985 flock->fl_type = F_WRLCK;
986 } else if (!cinode->can_cache_brlcks)
987 rc = 1;
988 else
989 flock->fl_type = F_UNLCK;
990
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700991 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400992 return rc;
993}
994
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400995static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300996cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400997{
David Howells2b0143b2015-03-17 22:25:59 +0000998 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700999 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001000 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001001 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001002}
1003
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001004/*
1005 * Set the byte-range lock (mandatory style). Returns:
1006 * 1) 0, if we set the lock and don't need to request to the server;
1007 * 2) 1, if no locks prevent us but we need to request to the server;
Colin Ian King413d6102018-10-26 19:07:21 +01001008 * 3) -EACCES, if there is a lock that prevents us and wait is false.
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001009 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001010static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001011cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001012 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001013{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001014 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001015 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001016 bool exist;
1017 int rc = 0;
1018
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001019try_again:
1020 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001021 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001022
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001023 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001024 lock->type, lock->flags, &conf_lock,
1025 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001026 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001027 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001028 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001029 return rc;
1030 }
1031
1032 if (!exist)
1033 rc = 1;
1034 else if (!wait)
1035 rc = -EACCES;
1036 else {
1037 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001038 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001039 rc = wait_event_interruptible(lock->block_q,
1040 (lock->blist.prev == &lock->blist) &&
1041 (lock->blist.next == &lock->blist));
1042 if (!rc)
1043 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001044 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001045 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001046 }
1047
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001048 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001049 return rc;
1050}
1051
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001052/*
1053 * Check if there is another lock that prevents us to set the lock (posix
1054 * style). If such a lock exists, update the flock structure with its
1055 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1056 * or leave it the same if we can't. Returns 0 if we don't need to request to
1057 * the server or 1 otherwise.
1058 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001059static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001060cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1061{
1062 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001063 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001064 unsigned char saved_type = flock->fl_type;
1065
Pavel Shilovsky50792762011-10-29 17:17:57 +04001066 if ((flock->fl_flags & FL_POSIX) == 0)
1067 return 1;
1068
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001069 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001070 posix_test_lock(file, flock);
1071
1072 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1073 flock->fl_type = saved_type;
1074 rc = 1;
1075 }
1076
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001077 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001078 return rc;
1079}
1080
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001081/*
1082 * Set the byte-range lock (posix style). Returns:
1083 * 1) 0, if we set the lock and don't need to request to the server;
1084 * 2) 1, if we need to request to the server;
1085 * 3) <0, if the error occurs while setting the lock.
1086 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001087static int
1088cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1089{
Al Viro496ad9a2013-01-23 17:07:38 -05001090 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001091 int rc = 1;
1092
1093 if ((flock->fl_flags & FL_POSIX) == 0)
1094 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001095
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001096try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001097 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001098 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001099 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001100 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001101 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001102
1103 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001104 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001105 if (rc == FILE_LOCK_DEFERRED) {
NeilBrownada5c1d2018-11-30 10:04:08 +11001106 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_blocker);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001107 if (!rc)
1108 goto try_again;
NeilBrowncb03f942018-11-30 10:04:08 +11001109 locks_delete_block(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001110 }
Steve French9ebb3892012-04-01 13:52:54 -05001111 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001112}
1113
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001114int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001115cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001116{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001117 unsigned int xid;
1118 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001119 struct cifsLockInfo *li, *tmp;
1120 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001121 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001122 LOCKING_ANDX_RANGE *buf, *cur;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001123 static const int types[] = {
1124 LOCKING_ANDX_LARGE_FILES,
1125 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1126 };
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001127 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001128
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001129 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001130 tcon = tlink_tcon(cfile->tlink);
1131
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001132 /*
1133 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1134 * and check it for zero before using.
1135 */
1136 max_buf = tcon->ses->server->maxBuf;
1137 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001138 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001139 return -EINVAL;
1140 }
1141
1142 max_num = (max_buf - sizeof(struct smb_hdr)) /
1143 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001144 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001145 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001146 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001147 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001148 }
1149
1150 for (i = 0; i < 2; i++) {
1151 cur = buf;
1152 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001153 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001154 if (li->type != types[i])
1155 continue;
1156 cur->Pid = cpu_to_le16(li->pid);
1157 cur->LengthLow = cpu_to_le32((u32)li->length);
1158 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1159 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1160 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1161 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001162 stored_rc = cifs_lockv(xid, tcon,
1163 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001164 (__u8)li->type, 0, num,
1165 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001166 if (stored_rc)
1167 rc = stored_rc;
1168 cur = buf;
1169 num = 0;
1170 } else
1171 cur++;
1172 }
1173
1174 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001175 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001176 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001177 if (stored_rc)
1178 rc = stored_rc;
1179 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001180 }
1181
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001182 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001183 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001184 return rc;
1185}
1186
Jeff Layton3d224622016-05-24 06:27:44 -04001187static __u32
1188hash_lockowner(fl_owner_t owner)
1189{
1190 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1191}
1192
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001193struct lock_to_push {
1194 struct list_head llist;
1195 __u64 offset;
1196 __u64 length;
1197 __u32 pid;
1198 __u16 netfid;
1199 __u8 type;
1200};
1201
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001202static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001203cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001204{
David Howells2b0143b2015-03-17 22:25:59 +00001205 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001206 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001207 struct file_lock *flock;
1208 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001209 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001210 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001211 struct list_head locks_to_send, *el;
1212 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001213 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001214
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001215 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001216
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001217 if (!flctx)
1218 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001219
Jeff Laytone084c1b2015-02-16 14:32:03 -05001220 spin_lock(&flctx->flc_lock);
1221 list_for_each(el, &flctx->flc_posix) {
1222 count++;
1223 }
1224 spin_unlock(&flctx->flc_lock);
1225
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001226 INIT_LIST_HEAD(&locks_to_send);
1227
1228 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001229 * Allocating count locks is enough because no FL_POSIX locks can be
1230 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001231 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001232 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001233 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001234 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1235 if (!lck) {
1236 rc = -ENOMEM;
1237 goto err_out;
1238 }
1239 list_add_tail(&lck->llist, &locks_to_send);
1240 }
1241
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001242 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001243 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001244 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001245 if (el == &locks_to_send) {
1246 /*
1247 * The list ended. We don't have enough allocated
1248 * structures - something is really wrong.
1249 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001250 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001251 break;
1252 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001253 length = 1 + flock->fl_end - flock->fl_start;
1254 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1255 type = CIFS_RDLCK;
1256 else
1257 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001258 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001259 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001260 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001261 lck->length = length;
1262 lck->type = type;
1263 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001264 }
Jeff Layton6109c852015-01-16 15:05:57 -05001265 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001266
1267 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001268 int stored_rc;
1269
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001270 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001271 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001272 lck->type, 0);
1273 if (stored_rc)
1274 rc = stored_rc;
1275 list_del(&lck->llist);
1276 kfree(lck);
1277 }
1278
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001279out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001280 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001281 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001282err_out:
1283 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1284 list_del(&lck->llist);
1285 kfree(lck);
1286 }
1287 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001288}
1289
1290static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001291cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001292{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001293 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001294 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001295 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001296 int rc = 0;
1297
1298 /* we are going to update can_cache_brlcks here - need a write access */
1299 down_write(&cinode->lock_sem);
1300 if (!cinode->can_cache_brlcks) {
1301 up_write(&cinode->lock_sem);
1302 return rc;
1303 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001304
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001305 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001306 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1307 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001308 rc = cifs_push_posix_locks(cfile);
1309 else
1310 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001311
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001312 cinode->can_cache_brlcks = false;
1313 up_write(&cinode->lock_sem);
1314 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001315}
1316
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001317static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001318cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001319 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001321 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001322 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001323 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001324 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001325 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001326 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001327 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001329 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001330 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001331 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001332 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001333 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001334 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001335 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001336 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001338 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001339 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001340 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001341 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001342 *lock = 1;
1343 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001344 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001345 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001346 *unlock = 1;
1347 /* Check if unlock includes more than one lock range */
1348 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001349 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001350 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001351 *lock = 1;
1352 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001353 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001354 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001355 *lock = 1;
1356 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001357 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001358 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001359 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001361 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001362}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001364static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001365cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001366 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001367{
1368 int rc = 0;
1369 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001370 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1371 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001372 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001373 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001375 if (posix_lck) {
1376 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001377
1378 rc = cifs_posix_lock_test(file, flock);
1379 if (!rc)
1380 return rc;
1381
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001382 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001383 posix_lock_type = CIFS_RDLCK;
1384 else
1385 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001386 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1387 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001388 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001389 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 return rc;
1391 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001392
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001393 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001394 if (!rc)
1395 return rc;
1396
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001397 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001398 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1399 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001400 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001401 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1402 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001403 flock->fl_type = F_UNLCK;
1404 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001405 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1406 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001407 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001408 }
1409
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001410 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001411 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001412 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001413 }
1414
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001415 type &= ~server->vals->exclusive_lock_type;
1416
1417 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1418 type | server->vals->shared_lock_type,
1419 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001420 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001421 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1422 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001423 flock->fl_type = F_RDLCK;
1424 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001425 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1426 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001427 } else
1428 flock->fl_type = F_WRLCK;
1429
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001430 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001431}
1432
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001433void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001434cifs_move_llist(struct list_head *source, struct list_head *dest)
1435{
1436 struct list_head *li, *tmp;
1437 list_for_each_safe(li, tmp, source)
1438 list_move(li, dest);
1439}
1440
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001441void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001442cifs_free_llist(struct list_head *llist)
1443{
1444 struct cifsLockInfo *li, *tmp;
1445 list_for_each_entry_safe(li, tmp, llist, llist) {
1446 cifs_del_lock_waiters(li);
1447 list_del(&li->llist);
1448 kfree(li);
1449 }
1450}
1451
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001452int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001453cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1454 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001455{
1456 int rc = 0, stored_rc;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001457 static const int types[] = {
1458 LOCKING_ANDX_LARGE_FILES,
1459 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1460 };
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001461 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001462 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001463 LOCKING_ANDX_RANGE *buf, *cur;
1464 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001465 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001466 struct cifsLockInfo *li, *tmp;
1467 __u64 length = 1 + flock->fl_end - flock->fl_start;
1468 struct list_head tmp_llist;
1469
1470 INIT_LIST_HEAD(&tmp_llist);
1471
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001472 /*
1473 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1474 * and check it for zero before using.
1475 */
1476 max_buf = tcon->ses->server->maxBuf;
1477 if (!max_buf)
1478 return -EINVAL;
1479
1480 max_num = (max_buf - sizeof(struct smb_hdr)) /
1481 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001482 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001483 if (!buf)
1484 return -ENOMEM;
1485
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001486 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001487 for (i = 0; i < 2; i++) {
1488 cur = buf;
1489 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001490 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001491 if (flock->fl_start > li->offset ||
1492 (flock->fl_start + length) <
1493 (li->offset + li->length))
1494 continue;
1495 if (current->tgid != li->pid)
1496 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001497 if (types[i] != li->type)
1498 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001499 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001500 /*
1501 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001502 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001503 */
1504 list_del(&li->llist);
1505 cifs_del_lock_waiters(li);
1506 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001507 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001508 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001509 cur->Pid = cpu_to_le16(li->pid);
1510 cur->LengthLow = cpu_to_le32((u32)li->length);
1511 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1512 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1513 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1514 /*
1515 * We need to save a lock here to let us add it again to
1516 * the file's list if the unlock range request fails on
1517 * the server.
1518 */
1519 list_move(&li->llist, &tmp_llist);
1520 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001521 stored_rc = cifs_lockv(xid, tcon,
1522 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001523 li->type, num, 0, buf);
1524 if (stored_rc) {
1525 /*
1526 * We failed on the unlock range
1527 * request - add all locks from the tmp
1528 * list to the head of the file's list.
1529 */
1530 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001531 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001532 rc = stored_rc;
1533 } else
1534 /*
1535 * The unlock range request succeed -
1536 * free the tmp list.
1537 */
1538 cifs_free_llist(&tmp_llist);
1539 cur = buf;
1540 num = 0;
1541 } else
1542 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001543 }
1544 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001545 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001546 types[i], num, 0, buf);
1547 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001548 cifs_move_llist(&tmp_llist,
1549 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001550 rc = stored_rc;
1551 } else
1552 cifs_free_llist(&tmp_llist);
1553 }
1554 }
1555
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001556 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001557 kfree(buf);
1558 return rc;
1559}
1560
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001561static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001562cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001563 bool wait_flag, bool posix_lck, int lock, int unlock,
1564 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001565{
1566 int rc = 0;
1567 __u64 length = 1 + flock->fl_end - flock->fl_start;
1568 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1569 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001570 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001571 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001572
1573 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001574 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001575
1576 rc = cifs_posix_lock_set(file, flock);
1577 if (!rc || rc < 0)
1578 return rc;
1579
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001580 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001581 posix_lock_type = CIFS_RDLCK;
1582 else
1583 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001584
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001585 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001586 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001587
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001588 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001589 hash_lockowner(flock->fl_owner),
1590 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001591 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001592 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001593 }
1594
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001595 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001596 struct cifsLockInfo *lock;
1597
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001598 lock = cifs_lock_init(flock->fl_start, length, type,
1599 flock->fl_flags);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001600 if (!lock)
1601 return -ENOMEM;
1602
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001603 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001604 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001605 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001606 return rc;
1607 }
1608 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001609 goto out;
1610
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001611 /*
1612 * Windows 7 server can delay breaking lease from read to None
1613 * if we set a byte-range lock on a file - break it explicitly
1614 * before sending the lock to the server to be sure the next
1615 * read won't conflict with non-overlapted locks due to
1616 * pagereading.
1617 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001618 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1619 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001620 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001621 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1622 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001623 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001624 }
1625
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001626 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1627 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001628 if (rc) {
1629 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001630 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001631 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001632
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001633 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001634 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001635 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001636
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001637out:
Chengyu Song00b8c952015-03-24 20:18:49 -04001638 if (flock->fl_flags & FL_POSIX && !rc)
Benjamin Coddington4f656362015-10-22 13:38:14 -04001639 rc = locks_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001640 return rc;
1641}
1642
1643int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1644{
1645 int rc, xid;
1646 int lock = 0, unlock = 0;
1647 bool wait_flag = false;
1648 bool posix_lck = false;
1649 struct cifs_sb_info *cifs_sb;
1650 struct cifs_tcon *tcon;
1651 struct cifsInodeInfo *cinode;
1652 struct cifsFileInfo *cfile;
1653 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001654 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001655
1656 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001657 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001658
Joe Perchesf96637b2013-05-04 22:12:25 -05001659 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1660 cmd, flock->fl_flags, flock->fl_type,
1661 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001662
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001663 cfile = (struct cifsFileInfo *)file->private_data;
1664 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001665
1666 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1667 tcon->ses->server);
Al Viro7119e222014-10-22 00:25:12 -04001668 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001669 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001670 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001671
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001672 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001673 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1674 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1675 posix_lck = true;
1676 /*
1677 * BB add code here to normalize offset and length to account for
1678 * negative length which we can not accept over the wire.
1679 */
1680 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001681 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001682 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001683 return rc;
1684 }
1685
1686 if (!lock && !unlock) {
1687 /*
1688 * if no lock or unlock then nothing to do since we do not
1689 * know what it is
1690 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001691 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001692 return -EOPNOTSUPP;
1693 }
1694
1695 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1696 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001697 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 return rc;
1699}
1700
Jeff Layton597b0272012-03-23 14:40:56 -04001701/*
1702 * update the file size (if needed) after a write. Should be called with
1703 * the inode->i_lock held
1704 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001705void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001706cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1707 unsigned int bytes_written)
1708{
1709 loff_t end_of_write = offset + bytes_written;
1710
1711 if (end_of_write > cifsi->server_eof)
1712 cifsi->server_eof = end_of_write;
1713}
1714
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001715static ssize_t
1716cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1717 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718{
1719 int rc = 0;
1720 unsigned int bytes_written = 0;
1721 unsigned int total_written;
1722 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001723 struct cifs_tcon *tcon;
1724 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001725 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001726 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001727 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001728 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729
Jeff Layton7da4b492010-10-15 15:34:00 -04001730 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
Al Viro35c265e2014-08-19 20:25:34 -04001732 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1733 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001735 tcon = tlink_tcon(open_file->tlink);
1736 server = tcon->ses->server;
1737
1738 if (!server->ops->sync_write)
1739 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001740
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001741 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 for (total_written = 0; write_size > total_written;
1744 total_written += bytes_written) {
1745 rc = -EAGAIN;
1746 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001747 struct kvec iov[2];
1748 unsigned int len;
1749
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 /* we could deadlock if we called
1752 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001753 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001755 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 if (rc != 0)
1757 break;
1758 }
Steve French3e844692005-10-03 13:37:24 -07001759
David Howells2b0143b2015-03-17 22:25:59 +00001760 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001761 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001762 /* iov[0] is reserved for smb header */
1763 iov[1].iov_base = (char *)write_data + total_written;
1764 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001765 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001766 io_parms.tcon = tcon;
1767 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001768 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001769 rc = server->ops->sync_write(xid, &open_file->fid,
1770 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 }
1772 if (rc || (bytes_written == 0)) {
1773 if (total_written)
1774 break;
1775 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001776 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 return rc;
1778 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001779 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001780 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001781 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001782 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001783 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001784 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 }
1786
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001787 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
Jeff Layton7da4b492010-10-15 15:34:00 -04001789 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001790 spin_lock(&d_inode(dentry)->i_lock);
1791 if (*offset > d_inode(dentry)->i_size)
1792 i_size_write(d_inode(dentry), *offset);
1793 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 }
David Howells2b0143b2015-03-17 22:25:59 +00001795 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001796 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 return total_written;
1798}
1799
Jeff Layton6508d902010-09-29 19:51:11 -04001800struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1801 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001802{
1803 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001804 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001805 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Layton6508d902010-09-29 19:51:11 -04001806
1807 /* only filter by fsuid on multiuser mounts */
1808 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1809 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001810
Steve French3afca262016-09-22 18:58:16 -05001811 spin_lock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001812 /* we could simply get the first_list_entry since write-only entries
1813 are always at the end of the list but since the first entry might
1814 have a close pending, we go through the whole list */
1815 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001816 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001817 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001818 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001819 if (!open_file->invalidHandle) {
1820 /* found a good file */
1821 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001822 cifsFileInfo_get(open_file);
1823 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001824 return open_file;
1825 } /* else might as well continue, and look for
1826 another, or simply have the caller reopen it
1827 again rather than trying to fix this handle */
1828 } else /* write only file */
1829 break; /* write only files are last so must be done */
1830 }
Steve French3afca262016-09-22 18:58:16 -05001831 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001832 return NULL;
1833}
Steve French630f3f0c2007-10-25 21:17:17 +00001834
Jeff Layton6508d902010-09-29 19:51:11 -04001835struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1836 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001837{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001838 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001839 struct cifs_sb_info *cifs_sb;
Steve French3afca262016-09-22 18:58:16 -05001840 struct cifs_tcon *tcon;
Jeff Layton2846d382008-09-22 21:33:33 -04001841 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001842 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001843 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001844
Steve French60808232006-04-22 15:53:05 +00001845 /* Having a null inode here (because mapping->host was set to zero by
1846 the VFS or MM) should not happen but we had reports of on oops (due to
1847 it being zero) during stress testcases so we need to check for it */
1848
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001849 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001850 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001851 dump_stack();
1852 return NULL;
1853 }
1854
Jeff Laytond3892292010-11-02 16:22:50 -04001855 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001856 tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Laytond3892292010-11-02 16:22:50 -04001857
Jeff Layton6508d902010-09-29 19:51:11 -04001858 /* only filter by fsuid on multiuser mounts */
1859 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1860 fsuid_only = false;
1861
Steve French3afca262016-09-22 18:58:16 -05001862 spin_lock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001863refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001864 if (refind > MAX_REOPEN_ATT) {
Steve French3afca262016-09-22 18:58:16 -05001865 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001866 return NULL;
1867 }
Steve French6148a742005-10-05 12:23:19 -07001868 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001869 if (!any_available && open_file->pid != current->tgid)
1870 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001871 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001872 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001873 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001874 if (!open_file->invalidHandle) {
1875 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001876 cifsFileInfo_get(open_file);
1877 spin_unlock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001878 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001879 } else {
1880 if (!inv_file)
1881 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001882 }
Steve French6148a742005-10-05 12:23:19 -07001883 }
1884 }
Jeff Layton2846d382008-09-22 21:33:33 -04001885 /* couldn't find useable FH with same pid, try any available */
1886 if (!any_available) {
1887 any_available = true;
1888 goto refind_writable;
1889 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001890
1891 if (inv_file) {
1892 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001893 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001894 }
1895
Steve French3afca262016-09-22 18:58:16 -05001896 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001897
1898 if (inv_file) {
1899 rc = cifs_reopen_file(inv_file, false);
1900 if (!rc)
1901 return inv_file;
1902 else {
Steve French3afca262016-09-22 18:58:16 -05001903 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001904 list_move_tail(&inv_file->flist,
1905 &cifs_inode->openFileList);
Steve French3afca262016-09-22 18:58:16 -05001906 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001907 cifsFileInfo_put(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001908 ++refind;
David Disseldorpe1e9bda2015-03-13 14:20:29 +01001909 inv_file = NULL;
Steve French3afca262016-09-22 18:58:16 -05001910 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001911 goto refind_writable;
1912 }
1913 }
1914
Steve French6148a742005-10-05 12:23:19 -07001915 return NULL;
1916}
1917
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1919{
1920 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001921 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 char *write_data;
1923 int rc = -EFAULT;
1924 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001926 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927
1928 if (!mapping || !mapping->host)
1929 return -EFAULT;
1930
1931 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932
1933 offset += (loff_t)from;
1934 write_data = kmap(page);
1935 write_data += from;
1936
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001937 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 kunmap(page);
1939 return -EIO;
1940 }
1941
1942 /* racing with truncate? */
1943 if (offset > mapping->host->i_size) {
1944 kunmap(page);
1945 return 0; /* don't care */
1946 }
1947
1948 /* check to make sure that we are not extending the file */
1949 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001950 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951
Jeff Layton6508d902010-09-29 19:51:11 -04001952 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001953 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001954 bytes_written = cifs_write(open_file, open_file->pid,
1955 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001956 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07001958 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001959 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001960 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001961 else if (bytes_written < 0)
1962 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001963 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05001964 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 rc = -EIO;
1966 }
1967
1968 kunmap(page);
1969 return rc;
1970}
1971
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001972static struct cifs_writedata *
1973wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1974 pgoff_t end, pgoff_t *index,
1975 unsigned int *found_pages)
1976{
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001977 struct cifs_writedata *wdata;
1978
1979 wdata = cifs_writedata_alloc((unsigned int)tofind,
1980 cifs_writev_complete);
1981 if (!wdata)
1982 return NULL;
1983
Jan Kara9c19a9c2017-11-15 17:35:26 -08001984 *found_pages = find_get_pages_range_tag(mapping, index, end,
1985 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001986 return wdata;
1987}
1988
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04001989static unsigned int
1990wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
1991 struct address_space *mapping,
1992 struct writeback_control *wbc,
1993 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
1994{
1995 unsigned int nr_pages = 0, i;
1996 struct page *page;
1997
1998 for (i = 0; i < found_pages; i++) {
1999 page = wdata->pages[i];
2000 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002001 * At this point we hold neither the i_pages lock nor the
2002 * page lock: the page may be truncated or invalidated
2003 * (changing page->mapping to NULL), or even swizzled
2004 * back from swapper_space to tmpfs file mapping
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002005 */
2006
2007 if (nr_pages == 0)
2008 lock_page(page);
2009 else if (!trylock_page(page))
2010 break;
2011
2012 if (unlikely(page->mapping != mapping)) {
2013 unlock_page(page);
2014 break;
2015 }
2016
2017 if (!wbc->range_cyclic && page->index > end) {
2018 *done = true;
2019 unlock_page(page);
2020 break;
2021 }
2022
2023 if (*next && (page->index != *next)) {
2024 /* Not next consecutive page */
2025 unlock_page(page);
2026 break;
2027 }
2028
2029 if (wbc->sync_mode != WB_SYNC_NONE)
2030 wait_on_page_writeback(page);
2031
2032 if (PageWriteback(page) ||
2033 !clear_page_dirty_for_io(page)) {
2034 unlock_page(page);
2035 break;
2036 }
2037
2038 /*
2039 * This actually clears the dirty bit in the radix tree.
2040 * See cifs_writepage() for more commentary.
2041 */
2042 set_page_writeback(page);
2043 if (page_offset(page) >= i_size_read(mapping->host)) {
2044 *done = true;
2045 unlock_page(page);
2046 end_page_writeback(page);
2047 break;
2048 }
2049
2050 wdata->pages[i] = page;
2051 *next = page->index + 1;
2052 ++nr_pages;
2053 }
2054
2055 /* reset index to refind any pages skipped */
2056 if (nr_pages == 0)
2057 *index = wdata->pages[0]->index + 1;
2058
2059 /* put any pages we aren't going to use */
2060 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002061 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002062 wdata->pages[i] = NULL;
2063 }
2064
2065 return nr_pages;
2066}
2067
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002068static int
2069wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2070 struct address_space *mapping, struct writeback_control *wbc)
2071{
2072 int rc = 0;
2073 struct TCP_Server_Info *server;
2074 unsigned int i;
2075
2076 wdata->sync_mode = wbc->sync_mode;
2077 wdata->nr_pages = nr_pages;
2078 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002079 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002080 wdata->tailsz = min(i_size_read(mapping->host) -
2081 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002082 (loff_t)PAGE_SIZE);
2083 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002084
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002085 if (wdata->cfile != NULL)
2086 cifsFileInfo_put(wdata->cfile);
2087 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2088 if (!wdata->cfile) {
2089 cifs_dbg(VFS, "No writable handles for inode\n");
2090 rc = -EBADF;
2091 } else {
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002092 wdata->pid = wdata->cfile->pid;
2093 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2094 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002095 }
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002096
2097 for (i = 0; i < nr_pages; ++i)
2098 unlock_page(wdata->pages[i]);
2099
2100 return rc;
2101}
2102
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002104 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002106 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002107 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002108 bool done = false, scanned = false, range_whole = false;
2109 pgoff_t end, index;
2110 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07002111 int rc = 0;
Steve French0cb012d2018-10-11 01:01:02 -05002112 unsigned int xid;
Steve French50c2f752007-07-13 00:33:32 +00002113
Steve French37c0eb42005-10-05 14:50:29 -07002114 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002115 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002116 * one page at a time via cifs_writepage
2117 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002118 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002119 return generic_writepages(mapping, wbc);
2120
Steve French0cb012d2018-10-11 01:01:02 -05002121 xid = get_xid();
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002122 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002123 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002124 end = -1;
2125 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002126 index = wbc->range_start >> PAGE_SHIFT;
2127 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002128 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002129 range_whole = true;
2130 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002131 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002132 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002133retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002134 while (!done && index <= end) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002135 unsigned int i, nr_pages, found_pages, wsize, credits;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002136 pgoff_t next = 0, tofind, saved_index = index;
Steve French37c0eb42005-10-05 14:50:29 -07002137
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002138 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2139 &wsize, &credits);
2140 if (rc)
2141 break;
Steve French37c0eb42005-10-05 14:50:29 -07002142
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002143 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002144
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002145 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2146 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002147 if (!wdata) {
2148 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002149 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002150 break;
2151 }
2152
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002153 if (found_pages == 0) {
2154 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002155 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002156 break;
2157 }
2158
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002159 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2160 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002161
2162 /* nothing to write? */
2163 if (nr_pages == 0) {
2164 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002165 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002166 continue;
2167 }
2168
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002169 wdata->credits = credits;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002170
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002171 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002172
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002173 /* send failure -- clean up the mess */
2174 if (rc != 0) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002175 add_credits_and_wake_if(server, wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002176 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002177 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002178 redirty_page_for_writepage(wbc,
2179 wdata->pages[i]);
2180 else
2181 SetPageError(wdata->pages[i]);
2182 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002183 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002184 }
Jeff Layton941b8532011-01-11 07:24:01 -05002185 if (rc != -EAGAIN)
2186 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002187 }
2188 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002189
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002190 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2191 index = saved_index;
2192 continue;
2193 }
2194
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002195 wbc->nr_to_write -= nr_pages;
2196 if (wbc->nr_to_write <= 0)
2197 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002198
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002199 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002200 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002201
Steve French37c0eb42005-10-05 14:50:29 -07002202 if (!scanned && !done) {
2203 /*
2204 * We hit the last page and there is more work to be done: wrap
2205 * back to the start of the file
2206 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002207 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002208 index = 0;
2209 goto retry;
2210 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002211
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002212 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002213 mapping->writeback_index = index;
2214
Steve French0cb012d2018-10-11 01:01:02 -05002215 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 return rc;
2217}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002219static int
2220cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002222 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002223 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002225 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002227 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002228 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002229 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002230
2231 /*
2232 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2233 *
2234 * A writepage() implementation always needs to do either this,
2235 * or re-dirty the page with "redirty_page_for_writepage()" in
2236 * the case of a failure.
2237 *
2238 * Just unlocking the page will cause the radix tree tag-bits
2239 * to fail to update with the state of the page correctly.
2240 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002241 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002242retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002243 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Jeff Layton97b37f22017-05-25 06:59:52 -04002244 if (rc == -EAGAIN) {
2245 if (wbc->sync_mode == WB_SYNC_ALL)
2246 goto retry_write;
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002247 redirty_page_for_writepage(wbc, page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002248 } else if (rc != 0) {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002249 SetPageError(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002250 mapping_set_error(page->mapping, rc);
2251 } else {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002252 SetPageUptodate(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002253 }
Linus Torvaldscb876f42006-12-23 16:19:07 -08002254 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002255 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002256 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 return rc;
2258}
2259
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002260static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2261{
2262 int rc = cifs_writepage_locked(page, wbc);
2263 unlock_page(page);
2264 return rc;
2265}
2266
Nick Piggind9414772008-09-24 11:32:59 -04002267static int cifs_write_end(struct file *file, struct address_space *mapping,
2268 loff_t pos, unsigned len, unsigned copied,
2269 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270{
Nick Piggind9414772008-09-24 11:32:59 -04002271 int rc;
2272 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002273 struct cifsFileInfo *cfile = file->private_data;
2274 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2275 __u32 pid;
2276
2277 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2278 pid = cfile->pid;
2279 else
2280 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281
Joe Perchesf96637b2013-05-04 22:12:25 -05002282 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002283 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002284
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002285 if (PageChecked(page)) {
2286 if (copied == len)
2287 SetPageUptodate(page);
2288 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002289 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002290 SetPageUptodate(page);
2291
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002293 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002294 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002295 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002296
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002297 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 /* this is probably better than directly calling
2299 partialpage_write since in this function the file handle is
2300 known which we might as well leverage */
2301 /* BB check if anything else missing out of ppw
2302 such as updating last write time */
2303 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002304 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002305 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002307
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002308 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002309 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002310 rc = copied;
2311 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002312 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 }
2314
Nick Piggind9414772008-09-24 11:32:59 -04002315 if (rc > 0) {
2316 spin_lock(&inode->i_lock);
2317 if (pos > inode->i_size)
2318 i_size_write(inode, pos);
2319 spin_unlock(&inode->i_lock);
2320 }
2321
2322 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002323 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002324
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 return rc;
2326}
2327
Josef Bacik02c24a82011-07-16 20:44:56 -04002328int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2329 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002331 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002333 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002334 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002335 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002336 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002337 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002339 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002340 if (rc)
2341 return rc;
Al Viro59551022016-01-22 15:40:57 -05002342 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04002343
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002344 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345
Al Viro35c265e2014-08-19 20:25:34 -04002346 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2347 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002348
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002349 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002350 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002351 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002352 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002353 rc = 0; /* don't care about it in fsync */
2354 }
2355 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002356
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002357 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002358 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2359 server = tcon->ses->server;
2360 if (server->ops->flush)
2361 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2362 else
2363 rc = -ENOSYS;
2364 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002365
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002366 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002367 inode_unlock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002368 return rc;
2369}
2370
Josef Bacik02c24a82011-07-16 20:44:56 -04002371int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002372{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002373 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002374 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002375 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002376 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002377 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002378 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002379 struct inode *inode = file->f_mapping->host;
2380
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002381 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002382 if (rc)
2383 return rc;
Al Viro59551022016-01-22 15:40:57 -05002384 inode_lock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002385
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002386 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002387
Al Viro35c265e2014-08-19 20:25:34 -04002388 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2389 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002390
2391 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002392 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2393 server = tcon->ses->server;
2394 if (server->ops->flush)
2395 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2396 else
2397 rc = -ENOSYS;
2398 }
Steve Frenchb298f222009-02-21 21:17:43 +00002399
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002400 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002401 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 return rc;
2403}
2404
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405/*
2406 * As file closes, flush all cached write data for this inode checking
2407 * for write behind errors.
2408 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002409int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410{
Al Viro496ad9a2013-01-23 17:07:38 -05002411 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 int rc = 0;
2413
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002414 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002415 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002416
Joe Perchesf96637b2013-05-04 22:12:25 -05002417 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418
2419 return rc;
2420}
2421
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002422static int
2423cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2424{
2425 int rc = 0;
2426 unsigned long i;
2427
2428 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002429 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002430 if (!pages[i]) {
2431 /*
2432 * save number of pages we have already allocated and
2433 * return with ENOMEM error
2434 */
2435 num_pages = i;
2436 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002437 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002438 }
2439 }
2440
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002441 if (rc) {
2442 for (i = 0; i < num_pages; i++)
2443 put_page(pages[i]);
2444 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002445 return rc;
2446}
2447
2448static inline
2449size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2450{
2451 size_t num_pages;
2452 size_t clen;
2453
2454 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002455 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002456
2457 if (cur_len)
2458 *cur_len = clen;
2459
2460 return num_pages;
2461}
2462
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002463static void
Steve French4a5c80d2014-02-07 20:45:12 -06002464cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002465{
2466 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002467 struct cifs_writedata *wdata = container_of(refcount,
2468 struct cifs_writedata, refcount);
2469
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002470 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
Steve French4a5c80d2014-02-07 20:45:12 -06002471 for (i = 0; i < wdata->nr_pages; i++)
2472 put_page(wdata->pages[i]);
2473 cifs_writedata_release(refcount);
2474}
2475
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002476static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2477
Steve French4a5c80d2014-02-07 20:45:12 -06002478static void
2479cifs_uncached_writev_complete(struct work_struct *work)
2480{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002481 struct cifs_writedata *wdata = container_of(work,
2482 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002483 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002484 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2485
2486 spin_lock(&inode->i_lock);
2487 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2488 if (cifsi->server_eof > inode->i_size)
2489 i_size_write(inode, cifsi->server_eof);
2490 spin_unlock(&inode->i_lock);
2491
2492 complete(&wdata->done);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002493 collect_uncached_write_data(wdata->ctx);
2494 /* the below call can possibly free the last ref to aio ctx */
Steve French4a5c80d2014-02-07 20:45:12 -06002495 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002496}
2497
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002498static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002499wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2500 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002501{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002502 size_t save_len, copied, bytes, cur_len = *len;
2503 unsigned long i, nr_pages = *num_pages;
2504
2505 save_len = cur_len;
2506 for (i = 0; i < nr_pages; i++) {
2507 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2508 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2509 cur_len -= copied;
2510 /*
2511 * If we didn't copy as much as we expected, then that
2512 * may mean we trod into an unmapped area. Stop copying
2513 * at that point. On the next pass through the big
2514 * loop, we'll likely end up getting a zero-length
2515 * write and bailing out of it.
2516 */
2517 if (copied < bytes)
2518 break;
2519 }
2520 cur_len = save_len - cur_len;
2521 *len = cur_len;
2522
2523 /*
2524 * If we have no data to send, then that probably means that
2525 * the copy above failed altogether. That's most likely because
2526 * the address in the iovec was bogus. Return -EFAULT and let
2527 * the caller free anything we allocated and bail out.
2528 */
2529 if (!cur_len)
2530 return -EFAULT;
2531
2532 /*
2533 * i + 1 now represents the number of pages we actually used in
2534 * the copy phase above.
2535 */
2536 *num_pages = i + 1;
2537 return 0;
2538}
2539
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002540static int
Long Li8c5f9c12018-10-31 22:13:10 +00002541cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
2542 struct cifs_aio_ctx *ctx)
2543{
2544 int wait_retry = 0;
2545 unsigned int wsize, credits;
2546 int rc;
2547 struct TCP_Server_Info *server =
2548 tlink_tcon(wdata->cfile->tlink)->ses->server;
2549
2550 /*
2551 * Try to resend this wdata, waiting for credits up to 3 seconds.
2552 * Note: we are attempting to resend the whole wdata not in segments
2553 */
2554 do {
2555 rc = server->ops->wait_mtu_credits(
2556 server, wdata->bytes, &wsize, &credits);
2557
2558 if (rc)
2559 break;
2560
2561 if (wsize < wdata->bytes) {
2562 add_credits_and_wake_if(server, credits, 0);
2563 msleep(1000);
2564 wait_retry++;
2565 }
2566 } while (wsize < wdata->bytes && wait_retry < 3);
2567
2568 if (wsize < wdata->bytes) {
2569 rc = -EBUSY;
2570 goto out;
2571 }
2572
2573 rc = -EAGAIN;
2574 while (rc == -EAGAIN) {
2575 rc = 0;
2576 if (wdata->cfile->invalidHandle)
2577 rc = cifs_reopen_file(wdata->cfile, false);
2578 if (!rc)
2579 rc = server->ops->async_writev(wdata,
2580 cifs_uncached_writedata_release);
2581 }
2582
2583 if (!rc) {
2584 list_add_tail(&wdata->list, wdata_list);
2585 return 0;
2586 }
2587
2588 add_credits_and_wake_if(server, wdata->credits, 0);
2589out:
2590 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2591
2592 return rc;
2593}
2594
2595static int
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002596cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2597 struct cifsFileInfo *open_file,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002598 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2599 struct cifs_aio_ctx *ctx)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002600{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002601 int rc = 0;
2602 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002603 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002604 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002605 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002606 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002607 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002608 struct TCP_Server_Info *server;
Long Li8c5f9c12018-10-31 22:13:10 +00002609 struct page **pagevec;
2610 size_t start;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002611
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002612 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2613 pid = open_file->pid;
2614 else
2615 pid = current->tgid;
2616
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002617 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002618
2619 do {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002620 unsigned int wsize, credits;
2621
2622 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2623 &wsize, &credits);
2624 if (rc)
2625 break;
2626
Long Li8c5f9c12018-10-31 22:13:10 +00002627 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05002628 ssize_t result;
2629
2630 result = iov_iter_get_pages_alloc(
Long Li8c5f9c12018-10-31 22:13:10 +00002631 from, &pagevec, wsize, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05002632 if (result < 0) {
Long Li8c5f9c12018-10-31 22:13:10 +00002633 cifs_dbg(VFS,
2634 "direct_writev couldn't get user pages "
2635 "(rc=%zd) iter type %d iov_offset %zd "
2636 "count %zd\n",
Steve Frenchb98e26d2018-11-01 10:54:32 -05002637 result, from->type,
Long Li8c5f9c12018-10-31 22:13:10 +00002638 from->iov_offset, from->count);
2639 dump_stack();
2640 break;
2641 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05002642 cur_len = (size_t)result;
Long Li8c5f9c12018-10-31 22:13:10 +00002643 iov_iter_advance(from, cur_len);
2644
2645 nr_pages =
2646 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
2647
2648 wdata = cifs_writedata_direct_alloc(pagevec,
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002649 cifs_uncached_writev_complete);
Long Li8c5f9c12018-10-31 22:13:10 +00002650 if (!wdata) {
2651 rc = -ENOMEM;
2652 add_credits_and_wake_if(server, credits, 0);
2653 break;
2654 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002655
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002656
Long Li8c5f9c12018-10-31 22:13:10 +00002657 wdata->page_offset = start;
2658 wdata->tailsz =
2659 nr_pages > 1 ?
2660 cur_len - (PAGE_SIZE - start) -
2661 (nr_pages - 2) * PAGE_SIZE :
2662 cur_len;
2663 } else {
2664 nr_pages = get_numpages(wsize, len, &cur_len);
2665 wdata = cifs_writedata_alloc(nr_pages,
2666 cifs_uncached_writev_complete);
2667 if (!wdata) {
2668 rc = -ENOMEM;
2669 add_credits_and_wake_if(server, credits, 0);
2670 break;
2671 }
Jeff Layton5d81de82014-02-14 07:20:35 -05002672
Long Li8c5f9c12018-10-31 22:13:10 +00002673 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2674 if (rc) {
2675 kfree(wdata);
2676 add_credits_and_wake_if(server, credits, 0);
2677 break;
2678 }
2679
2680 num_pages = nr_pages;
2681 rc = wdata_fill_from_iovec(
2682 wdata, from, &cur_len, &num_pages);
2683 if (rc) {
2684 for (i = 0; i < nr_pages; i++)
2685 put_page(wdata->pages[i]);
2686 kfree(wdata);
2687 add_credits_and_wake_if(server, credits, 0);
2688 break;
2689 }
2690
2691 /*
2692 * Bring nr_pages down to the number of pages we
2693 * actually used, and free any pages that we didn't use.
2694 */
2695 for ( ; nr_pages > num_pages; nr_pages--)
2696 put_page(wdata->pages[nr_pages - 1]);
2697
2698 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2699 }
Jeff Layton5d81de82014-02-14 07:20:35 -05002700
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002701 wdata->sync_mode = WB_SYNC_ALL;
2702 wdata->nr_pages = nr_pages;
2703 wdata->offset = (__u64)offset;
2704 wdata->cfile = cifsFileInfo_get(open_file);
2705 wdata->pid = pid;
2706 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002707 wdata->pagesz = PAGE_SIZE;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002708 wdata->credits = credits;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002709 wdata->ctx = ctx;
2710 kref_get(&ctx->refcount);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002711
2712 if (!wdata->cfile->invalidHandle ||
Germano Percossi1fa839b2017-04-07 12:29:38 +01002713 !(rc = cifs_reopen_file(wdata->cfile, false)))
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002714 rc = server->ops->async_writev(wdata,
2715 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002716 if (rc) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002717 add_credits_and_wake_if(server, wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002718 kref_put(&wdata->refcount,
2719 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002720 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002721 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002722 iov_iter_advance(from, offset - saved_offset);
2723 continue;
2724 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002725 break;
2726 }
2727
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002728 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002729 offset += cur_len;
2730 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002731 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002732
2733 return rc;
2734}
2735
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002736static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2737{
2738 struct cifs_writedata *wdata, *tmp;
2739 struct cifs_tcon *tcon;
2740 struct cifs_sb_info *cifs_sb;
2741 struct dentry *dentry = ctx->cfile->dentry;
2742 unsigned int i;
2743 int rc;
2744
2745 tcon = tlink_tcon(ctx->cfile->tlink);
2746 cifs_sb = CIFS_SB(dentry->d_sb);
2747
2748 mutex_lock(&ctx->aio_mutex);
2749
2750 if (list_empty(&ctx->list)) {
2751 mutex_unlock(&ctx->aio_mutex);
2752 return;
2753 }
2754
2755 rc = ctx->rc;
2756 /*
2757 * Wait for and collect replies for any successful sends in order of
2758 * increasing offset. Once an error is hit, then return without waiting
2759 * for any more replies.
2760 */
2761restart_loop:
2762 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
2763 if (!rc) {
2764 if (!try_wait_for_completion(&wdata->done)) {
2765 mutex_unlock(&ctx->aio_mutex);
2766 return;
2767 }
2768
2769 if (wdata->result)
2770 rc = wdata->result;
2771 else
2772 ctx->total_len += wdata->bytes;
2773
2774 /* resend call if it's a retryable error */
2775 if (rc == -EAGAIN) {
2776 struct list_head tmp_list;
2777 struct iov_iter tmp_from = ctx->iter;
2778
2779 INIT_LIST_HEAD(&tmp_list);
2780 list_del_init(&wdata->list);
2781
Long Li8c5f9c12018-10-31 22:13:10 +00002782 if (ctx->direct_io)
2783 rc = cifs_resend_wdata(
2784 wdata, &tmp_list, ctx);
2785 else {
2786 iov_iter_advance(&tmp_from,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002787 wdata->offset - ctx->pos);
2788
Long Li8c5f9c12018-10-31 22:13:10 +00002789 rc = cifs_write_from_iter(wdata->offset,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002790 wdata->bytes, &tmp_from,
2791 ctx->cfile, cifs_sb, &tmp_list,
2792 ctx);
Long Li8c5f9c12018-10-31 22:13:10 +00002793 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002794
2795 list_splice(&tmp_list, &ctx->list);
2796
2797 kref_put(&wdata->refcount,
2798 cifs_uncached_writedata_release);
2799 goto restart_loop;
2800 }
2801 }
2802 list_del_init(&wdata->list);
2803 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2804 }
2805
Long Li8c5f9c12018-10-31 22:13:10 +00002806 if (!ctx->direct_io)
2807 for (i = 0; i < ctx->npages; i++)
2808 put_page(ctx->bv[i].bv_page);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002809
2810 cifs_stats_bytes_written(tcon, ctx->total_len);
2811 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
2812
2813 ctx->rc = (rc == 0) ? ctx->total_len : rc;
2814
2815 mutex_unlock(&ctx->aio_mutex);
2816
2817 if (ctx->iocb && ctx->iocb->ki_complete)
2818 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
2819 else
2820 complete(&ctx->done);
2821}
2822
Long Li8c5f9c12018-10-31 22:13:10 +00002823static ssize_t __cifs_writev(
2824 struct kiocb *iocb, struct iov_iter *from, bool direct)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002825{
Al Viroe9d15932015-04-06 22:44:11 -04002826 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002827 ssize_t total_written = 0;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002828 struct cifsFileInfo *cfile;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002829 struct cifs_tcon *tcon;
2830 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002831 struct cifs_aio_ctx *ctx;
Al Virofc56b982016-09-21 18:18:23 -04002832 struct iov_iter saved_from = *from;
Long Li8c5f9c12018-10-31 22:13:10 +00002833 size_t len = iov_iter_count(from);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002834 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002835
Al Viroe9d15932015-04-06 22:44:11 -04002836 /*
Long Li8c5f9c12018-10-31 22:13:10 +00002837 * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
2838 * In this case, fall back to non-direct write function.
2839 * this could be improved by getting pages directly in ITER_KVEC
Al Viroe9d15932015-04-06 22:44:11 -04002840 */
Long Li8c5f9c12018-10-31 22:13:10 +00002841 if (direct && from->type & ITER_KVEC) {
2842 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
2843 direct = false;
2844 }
Al Viroe9d15932015-04-06 22:44:11 -04002845
Al Viro3309dd02015-04-09 12:55:47 -04002846 rc = generic_write_checks(iocb, from);
2847 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002848 return rc;
2849
Al Viro7119e222014-10-22 00:25:12 -04002850 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002851 cfile = file->private_data;
2852 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002853
2854 if (!tcon->ses->server->ops->async_writev)
2855 return -ENOSYS;
2856
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002857 ctx = cifs_aio_ctx_alloc();
2858 if (!ctx)
2859 return -ENOMEM;
2860
2861 ctx->cfile = cifsFileInfo_get(cfile);
2862
2863 if (!is_sync_kiocb(iocb))
2864 ctx->iocb = iocb;
2865
2866 ctx->pos = iocb->ki_pos;
2867
Long Li8c5f9c12018-10-31 22:13:10 +00002868 if (direct) {
2869 ctx->direct_io = true;
2870 ctx->iter = *from;
2871 ctx->len = len;
2872 } else {
2873 rc = setup_aio_ctx_iter(ctx, from, WRITE);
2874 if (rc) {
2875 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2876 return rc;
2877 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002878 }
2879
2880 /* grab a lock here due to read response handlers can access ctx */
2881 mutex_lock(&ctx->aio_mutex);
2882
2883 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
2884 cfile, cifs_sb, &ctx->list, ctx);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002885
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002886 /*
2887 * If at least one write was successfully sent, then discard any rc
2888 * value from the later writes. If the other write succeeds, then
2889 * we'll end up returning whatever was written. If it fails, then
2890 * we'll get a new rc value from that.
2891 */
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002892 if (!list_empty(&ctx->list))
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002893 rc = 0;
2894
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002895 mutex_unlock(&ctx->aio_mutex);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002896
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002897 if (rc) {
2898 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2899 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002900 }
2901
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002902 if (!is_sync_kiocb(iocb)) {
2903 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2904 return -EIOCBQUEUED;
2905 }
2906
2907 rc = wait_for_completion_killable(&ctx->done);
2908 if (rc) {
2909 mutex_lock(&ctx->aio_mutex);
2910 ctx->rc = rc = -EINTR;
2911 total_written = ctx->total_len;
2912 mutex_unlock(&ctx->aio_mutex);
2913 } else {
2914 rc = ctx->rc;
2915 total_written = ctx->total_len;
2916 }
2917
2918 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2919
Al Viroe9d15932015-04-06 22:44:11 -04002920 if (unlikely(!total_written))
2921 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002922
Al Viroe9d15932015-04-06 22:44:11 -04002923 iocb->ki_pos += total_written;
Al Viroe9d15932015-04-06 22:44:11 -04002924 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002925}
2926
Long Li8c5f9c12018-10-31 22:13:10 +00002927ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
2928{
2929 return __cifs_writev(iocb, from, true);
2930}
2931
2932ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
2933{
2934 return __cifs_writev(iocb, from, false);
2935}
2936
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002937static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002938cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002939{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002940 struct file *file = iocb->ki_filp;
2941 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2942 struct inode *inode = file->f_mapping->host;
2943 struct cifsInodeInfo *cinode = CIFS_I(inode);
2944 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04002945 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002946
Rabin Vincent966681c2017-06-29 16:01:42 +02002947 inode_lock(inode);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002948 /*
2949 * We need to hold the sem to be sure nobody modifies lock list
2950 * with a brlock that prevents writing.
2951 */
2952 down_read(&cinode->lock_sem);
Al Viro5f380c72015-04-07 11:28:12 -04002953
Al Viro3309dd02015-04-09 12:55:47 -04002954 rc = generic_write_checks(iocb, from);
2955 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04002956 goto out;
2957
Al Viro5f380c72015-04-07 11:28:12 -04002958 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Ronnie Sahlberg96457592018-10-04 09:24:38 +10002959 server->vals->exclusive_lock_type, 0,
2960 NULL, CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04002961 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04002962 else
2963 rc = -EACCES;
2964out:
Rabin Vincent966681c2017-06-29 16:01:42 +02002965 up_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05002966 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04002967
Christoph Hellwige2592212016-04-07 08:52:01 -07002968 if (rc > 0)
2969 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002970 return rc;
2971}
2972
2973ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002974cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002975{
Al Viro496ad9a2013-01-23 17:07:38 -05002976 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002977 struct cifsInodeInfo *cinode = CIFS_I(inode);
2978 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2979 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2980 iocb->ki_filp->private_data;
2981 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002982 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002983
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002984 written = cifs_get_writer(cinode);
2985 if (written)
2986 return written;
2987
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002988 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002989 if (cap_unix(tcon->ses) &&
2990 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002991 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04002992 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002993 goto out;
2994 }
Al Viro3dae8752014-04-03 12:05:17 -04002995 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002996 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002997 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002998 /*
2999 * For non-oplocked files in strict cache mode we need to write the data
3000 * to the server exactly from the pos to pos+len-1 rather than flush all
3001 * affected pages because it may cause a error with mandatory locks on
3002 * these pages but not on the region from pos to ppos+len-1.
3003 */
Al Viro3dae8752014-04-03 12:05:17 -04003004 written = cifs_user_writev(iocb, from);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003005 if (written > 0 && CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003006 /*
3007 * Windows 7 server can delay breaking level2 oplock if a write
3008 * request comes - break it on the client to prevent reading
3009 * an old data.
3010 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003011 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003012 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
3013 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003014 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003015 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003016out:
3017 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003018 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003019}
3020
Jeff Layton0471ca32012-05-16 07:13:16 -04003021static struct cifs_readdata *
Long Lif9f5aca2018-05-30 12:47:54 -07003022cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04003023{
3024 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003025
Long Lif9f5aca2018-05-30 12:47:54 -07003026 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04003027 if (rdata != NULL) {
Long Lif9f5aca2018-05-30 12:47:54 -07003028 rdata->pages = pages;
Jeff Layton6993f742012-05-16 07:13:17 -04003029 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003030 INIT_LIST_HEAD(&rdata->list);
3031 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04003032 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04003033 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003034
Jeff Layton0471ca32012-05-16 07:13:16 -04003035 return rdata;
3036}
3037
Long Lif9f5aca2018-05-30 12:47:54 -07003038static struct cifs_readdata *
3039cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
3040{
3041 struct page **pages =
Kees Cook6396bb22018-06-12 14:03:40 -07003042 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Long Lif9f5aca2018-05-30 12:47:54 -07003043 struct cifs_readdata *ret = NULL;
3044
3045 if (pages) {
3046 ret = cifs_readdata_direct_alloc(pages, complete);
3047 if (!ret)
3048 kfree(pages);
3049 }
3050
3051 return ret;
3052}
3053
Jeff Layton6993f742012-05-16 07:13:17 -04003054void
3055cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04003056{
Jeff Layton6993f742012-05-16 07:13:17 -04003057 struct cifs_readdata *rdata = container_of(refcount,
3058 struct cifs_readdata, refcount);
Long Libd3dcc62017-11-22 17:38:47 -07003059#ifdef CONFIG_CIFS_SMB_DIRECT
3060 if (rdata->mr) {
3061 smbd_deregister_mr(rdata->mr);
3062 rdata->mr = NULL;
3063 }
3064#endif
Jeff Layton6993f742012-05-16 07:13:17 -04003065 if (rdata->cfile)
3066 cifsFileInfo_put(rdata->cfile);
3067
Long Lif9f5aca2018-05-30 12:47:54 -07003068 kvfree(rdata->pages);
Jeff Layton0471ca32012-05-16 07:13:16 -04003069 kfree(rdata);
3070}
3071
Jeff Layton2a1bb132012-05-16 07:13:17 -04003072static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003073cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04003074{
3075 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003076 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04003077 unsigned int i;
3078
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003079 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04003080 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3081 if (!page) {
3082 rc = -ENOMEM;
3083 break;
3084 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003085 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04003086 }
3087
3088 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003089 for (i = 0; i < nr_pages; i++) {
3090 put_page(rdata->pages[i]);
3091 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04003092 }
3093 }
3094 return rc;
3095}
3096
3097static void
3098cifs_uncached_readdata_release(struct kref *refcount)
3099{
Jeff Layton1c892542012-05-16 07:13:17 -04003100 struct cifs_readdata *rdata = container_of(refcount,
3101 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003102 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003103
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003104 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003105 for (i = 0; i < rdata->nr_pages; i++) {
3106 put_page(rdata->pages[i]);
Jeff Layton1c892542012-05-16 07:13:17 -04003107 }
3108 cifs_readdata_release(refcount);
3109}
3110
Jeff Layton1c892542012-05-16 07:13:17 -04003111/**
3112 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3113 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05003114 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04003115 *
3116 * This function copies data from a list of pages in a readdata response into
3117 * an array of iovecs. It will first calculate where the data should go
3118 * based on the info in the readdata and then copy the data into that spot.
3119 */
Al Viro7f25bba2014-02-04 14:07:43 -05003120static int
3121cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04003122{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04003123 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003124 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003125
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003126 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003127 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02003128 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003129 size_t written;
3130
David Howells00e23702018-10-22 13:07:28 +01003131 if (unlikely(iov_iter_is_pipe(iter))) {
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003132 void *addr = kmap_atomic(page);
3133
3134 written = copy_to_iter(addr, copy, iter);
3135 kunmap_atomic(addr);
3136 } else
3137 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05003138 remaining -= written;
3139 if (written < copy && iov_iter_count(iter) > 0)
3140 break;
Jeff Layton1c892542012-05-16 07:13:17 -04003141 }
Al Viro7f25bba2014-02-04 14:07:43 -05003142 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04003143}
3144
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003145static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3146
Jeff Layton1c892542012-05-16 07:13:17 -04003147static void
3148cifs_uncached_readv_complete(struct work_struct *work)
3149{
3150 struct cifs_readdata *rdata = container_of(work,
3151 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04003152
3153 complete(&rdata->done);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003154 collect_uncached_read_data(rdata->ctx);
3155 /* the below call can possibly free the last ref to aio ctx */
Jeff Layton1c892542012-05-16 07:13:17 -04003156 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3157}
3158
3159static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003160uncached_fill_pages(struct TCP_Server_Info *server,
3161 struct cifs_readdata *rdata, struct iov_iter *iter,
3162 unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04003163{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003164 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003165 unsigned int i;
3166 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003167 unsigned int page_offset = rdata->page_offset;
Jeff Layton1c892542012-05-16 07:13:17 -04003168
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003169 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07003170 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003171 for (i = 0; i < nr_pages; i++) {
3172 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05003173 size_t n;
Long Li1dbe3462018-05-30 12:47:55 -07003174 unsigned int segment_size = rdata->pagesz;
3175
3176 if (i == 0)
3177 segment_size -= page_offset;
3178 else
3179 page_offset = 0;
3180
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003181
Al Viro71335662016-01-09 19:54:50 -05003182 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04003183 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003184 rdata->pages[i] = NULL;
3185 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04003186 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07003187 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003188 }
Long Li1dbe3462018-05-30 12:47:55 -07003189
Al Viro71335662016-01-09 19:54:50 -05003190 n = len;
Long Li1dbe3462018-05-30 12:47:55 -07003191 if (len >= segment_size)
Al Viro71335662016-01-09 19:54:50 -05003192 /* enough data to fill the page */
Long Li1dbe3462018-05-30 12:47:55 -07003193 n = segment_size;
3194 else
Al Viro71335662016-01-09 19:54:50 -05003195 rdata->tailsz = len;
Long Li1dbe3462018-05-30 12:47:55 -07003196 len -= n;
3197
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003198 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003199 result = copy_page_from_iter(
3200 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003201#ifdef CONFIG_CIFS_SMB_DIRECT
3202 else if (rdata->mr)
3203 result = n;
3204#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003205 else
Long Li1dbe3462018-05-30 12:47:55 -07003206 result = cifs_read_page_from_socket(
3207 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003208 if (result < 0)
3209 break;
3210
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003211 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003212 }
3213
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003214 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3215 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003216}
3217
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003218static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003219cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3220 struct cifs_readdata *rdata, unsigned int len)
3221{
3222 return uncached_fill_pages(server, rdata, NULL, len);
3223}
3224
3225static int
3226cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3227 struct cifs_readdata *rdata,
3228 struct iov_iter *iter)
3229{
3230 return uncached_fill_pages(server, rdata, iter, iter->count);
3231}
3232
Long Li6e6e2b82018-10-31 22:13:09 +00003233static int cifs_resend_rdata(struct cifs_readdata *rdata,
3234 struct list_head *rdata_list,
3235 struct cifs_aio_ctx *ctx)
3236{
3237 int wait_retry = 0;
3238 unsigned int rsize, credits;
3239 int rc;
3240 struct TCP_Server_Info *server =
3241 tlink_tcon(rdata->cfile->tlink)->ses->server;
3242
3243 /*
3244 * Try to resend this rdata, waiting for credits up to 3 seconds.
3245 * Note: we are attempting to resend the whole rdata not in segments
3246 */
3247 do {
3248 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
3249 &rsize, &credits);
3250
3251 if (rc)
3252 break;
3253
3254 if (rsize < rdata->bytes) {
3255 add_credits_and_wake_if(server, credits, 0);
3256 msleep(1000);
3257 wait_retry++;
3258 }
3259 } while (rsize < rdata->bytes && wait_retry < 3);
3260
3261 /*
3262 * If we can't find enough credits to send this rdata
3263 * release the rdata and return failure, this will pass
3264 * whatever I/O amount we have finished to VFS.
3265 */
3266 if (rsize < rdata->bytes) {
3267 rc = -EBUSY;
3268 goto out;
3269 }
3270
3271 rc = -EAGAIN;
3272 while (rc == -EAGAIN) {
3273 rc = 0;
3274 if (rdata->cfile->invalidHandle)
3275 rc = cifs_reopen_file(rdata->cfile, true);
3276 if (!rc)
3277 rc = server->ops->async_readv(rdata);
3278 }
3279
3280 if (!rc) {
3281 /* Add to aio pending list */
3282 list_add_tail(&rdata->list, rdata_list);
3283 return 0;
3284 }
3285
3286 add_credits_and_wake_if(server, rdata->credits, 0);
3287out:
3288 kref_put(&rdata->refcount,
3289 cifs_uncached_readdata_release);
3290
3291 return rc;
3292}
3293
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003294static int
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003295cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003296 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3297 struct cifs_aio_ctx *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003299 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003300 unsigned int npages, rsize, credits;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003301 size_t cur_len;
3302 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003303 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003304 struct TCP_Server_Info *server;
Long Li6e6e2b82018-10-31 22:13:09 +00003305 struct page **pagevec;
3306 size_t start;
3307 struct iov_iter direct_iov = ctx->iter;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003308
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003309 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003310
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003311 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3312 pid = open_file->pid;
3313 else
3314 pid = current->tgid;
3315
Long Li6e6e2b82018-10-31 22:13:09 +00003316 if (ctx->direct_io)
3317 iov_iter_advance(&direct_iov, offset - ctx->pos);
3318
Jeff Layton1c892542012-05-16 07:13:17 -04003319 do {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003320 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3321 &rsize, &credits);
3322 if (rc)
3323 break;
3324
3325 cur_len = min_t(const size_t, len, rsize);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003326
Long Li6e6e2b82018-10-31 22:13:09 +00003327 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05003328 ssize_t result;
Long Li6e6e2b82018-10-31 22:13:09 +00003329
Steve Frenchb98e26d2018-11-01 10:54:32 -05003330 result = iov_iter_get_pages_alloc(
Long Li6e6e2b82018-10-31 22:13:09 +00003331 &direct_iov, &pagevec,
3332 cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05003333 if (result < 0) {
Long Li6e6e2b82018-10-31 22:13:09 +00003334 cifs_dbg(VFS,
3335 "couldn't get user pages (cur_len=%zd)"
3336 " iter type %d"
3337 " iov_offset %zd count %zd\n",
Steve Frenchb98e26d2018-11-01 10:54:32 -05003338 result, direct_iov.type,
Long Li6e6e2b82018-10-31 22:13:09 +00003339 direct_iov.iov_offset,
3340 direct_iov.count);
3341 dump_stack();
3342 break;
3343 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05003344 cur_len = (size_t)result;
Long Li6e6e2b82018-10-31 22:13:09 +00003345 iov_iter_advance(&direct_iov, cur_len);
3346
3347 rdata = cifs_readdata_direct_alloc(
3348 pagevec, cifs_uncached_readv_complete);
3349 if (!rdata) {
3350 add_credits_and_wake_if(server, credits, 0);
3351 rc = -ENOMEM;
3352 break;
3353 }
3354
3355 npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
3356 rdata->page_offset = start;
3357 rdata->tailsz = npages > 1 ?
3358 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
3359 cur_len;
3360
3361 } else {
3362
3363 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3364 /* allocate a readdata struct */
3365 rdata = cifs_readdata_alloc(npages,
Jeff Layton1c892542012-05-16 07:13:17 -04003366 cifs_uncached_readv_complete);
Long Li6e6e2b82018-10-31 22:13:09 +00003367 if (!rdata) {
3368 add_credits_and_wake_if(server, credits, 0);
3369 rc = -ENOMEM;
3370 break;
3371 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003372
Long Li6e6e2b82018-10-31 22:13:09 +00003373 rc = cifs_read_allocate_pages(rdata, npages);
3374 if (rc)
3375 goto error;
3376
3377 rdata->tailsz = PAGE_SIZE;
3378 }
Jeff Layton1c892542012-05-16 07:13:17 -04003379
3380 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003381 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003382 rdata->offset = offset;
3383 rdata->bytes = cur_len;
3384 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003385 rdata->pagesz = PAGE_SIZE;
3386 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003387 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003388 rdata->credits = credits;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003389 rdata->ctx = ctx;
3390 kref_get(&ctx->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003391
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003392 if (!rdata->cfile->invalidHandle ||
Germano Percossi1fa839b2017-04-07 12:29:38 +01003393 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003394 rc = server->ops->async_readv(rdata);
Jeff Layton1c892542012-05-16 07:13:17 -04003395error:
3396 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003397 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003398 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003399 cifs_uncached_readdata_release);
3400 if (rc == -EAGAIN) {
3401 iov_iter_revert(&direct_iov, cur_len);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003402 continue;
Long Li6e6e2b82018-10-31 22:13:09 +00003403 }
Jeff Layton1c892542012-05-16 07:13:17 -04003404 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405 }
Jeff Layton1c892542012-05-16 07:13:17 -04003406
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003407 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003408 offset += cur_len;
3409 len -= cur_len;
3410 } while (len > 0);
3411
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003412 return rc;
3413}
3414
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003415static void
3416collect_uncached_read_data(struct cifs_aio_ctx *ctx)
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003417{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003418 struct cifs_readdata *rdata, *tmp;
3419 struct iov_iter *to = &ctx->iter;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003420 struct cifs_sb_info *cifs_sb;
3421 struct cifs_tcon *tcon;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003422 unsigned int i;
3423 int rc;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003424
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003425 tcon = tlink_tcon(ctx->cfile->tlink);
3426 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003427
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003428 mutex_lock(&ctx->aio_mutex);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003429
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003430 if (list_empty(&ctx->list)) {
3431 mutex_unlock(&ctx->aio_mutex);
3432 return;
3433 }
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003434
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003435 rc = ctx->rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003436 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003437again:
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003438 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
Jeff Layton1c892542012-05-16 07:13:17 -04003439 if (!rc) {
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003440 if (!try_wait_for_completion(&rdata->done)) {
3441 mutex_unlock(&ctx->aio_mutex);
3442 return;
3443 }
3444
3445 if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003446 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003447 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003448 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003449
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003450 list_del_init(&rdata->list);
3451 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003452
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003453 /*
3454 * Got a part of data and then reconnect has
3455 * happened -- fill the buffer and continue
3456 * reading.
3457 */
3458 if (got_bytes && got_bytes < rdata->bytes) {
Long Li6e6e2b82018-10-31 22:13:09 +00003459 rc = 0;
3460 if (!ctx->direct_io)
3461 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003462 if (rc) {
3463 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003464 cifs_uncached_readdata_release);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003465 continue;
3466 }
3467 }
3468
Long Li6e6e2b82018-10-31 22:13:09 +00003469 if (ctx->direct_io) {
3470 /*
3471 * Re-use rdata as this is a
3472 * direct I/O
3473 */
3474 rc = cifs_resend_rdata(
3475 rdata,
3476 &tmp_list, ctx);
3477 } else {
3478 rc = cifs_send_async_read(
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003479 rdata->offset + got_bytes,
3480 rdata->bytes - got_bytes,
3481 rdata->cfile, cifs_sb,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003482 &tmp_list, ctx);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003483
Long Li6e6e2b82018-10-31 22:13:09 +00003484 kref_put(&rdata->refcount,
3485 cifs_uncached_readdata_release);
3486 }
3487
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003488 list_splice(&tmp_list, &ctx->list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003489
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003490 goto again;
3491 } else if (rdata->result)
3492 rc = rdata->result;
Long Li6e6e2b82018-10-31 22:13:09 +00003493 else if (!ctx->direct_io)
Jeff Layton1c892542012-05-16 07:13:17 -04003494 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003495
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003496 /* if there was a short read -- discard anything left */
3497 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3498 rc = -ENODATA;
Long Li6e6e2b82018-10-31 22:13:09 +00003499
3500 ctx->total_len += rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003501 }
3502 list_del_init(&rdata->list);
3503 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003505
Long Li6e6e2b82018-10-31 22:13:09 +00003506 if (!ctx->direct_io) {
3507 for (i = 0; i < ctx->npages; i++) {
3508 if (ctx->should_dirty)
3509 set_page_dirty(ctx->bv[i].bv_page);
3510 put_page(ctx->bv[i].bv_page);
3511 }
Al Viro7f25bba2014-02-04 14:07:43 -05003512
Long Li6e6e2b82018-10-31 22:13:09 +00003513 ctx->total_len = ctx->len - iov_iter_count(to);
3514 }
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003515
3516 cifs_stats_bytes_read(tcon, ctx->total_len);
Jeff Layton1c892542012-05-16 07:13:17 -04003517
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003518 /* mask nodata case */
3519 if (rc == -ENODATA)
3520 rc = 0;
3521
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003522 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3523
3524 mutex_unlock(&ctx->aio_mutex);
3525
3526 if (ctx->iocb && ctx->iocb->ki_complete)
3527 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3528 else
3529 complete(&ctx->done);
3530}
3531
Long Li6e6e2b82018-10-31 22:13:09 +00003532static ssize_t __cifs_readv(
3533 struct kiocb *iocb, struct iov_iter *to, bool direct)
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003534{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003535 size_t len;
Long Li6e6e2b82018-10-31 22:13:09 +00003536 struct file *file = iocb->ki_filp;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003537 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003538 struct cifsFileInfo *cfile;
Long Li6e6e2b82018-10-31 22:13:09 +00003539 struct cifs_tcon *tcon;
3540 ssize_t rc, total_read = 0;
3541 loff_t offset = iocb->ki_pos;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003542 struct cifs_aio_ctx *ctx;
3543
Long Li6e6e2b82018-10-31 22:13:09 +00003544 /*
3545 * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
3546 * fall back to data copy read path
3547 * this could be improved by getting pages directly in ITER_KVEC
3548 */
3549 if (direct && to->type & ITER_KVEC) {
3550 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
3551 direct = false;
3552 }
3553
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003554 len = iov_iter_count(to);
3555 if (!len)
3556 return 0;
3557
3558 cifs_sb = CIFS_FILE_SB(file);
3559 cfile = file->private_data;
3560 tcon = tlink_tcon(cfile->tlink);
3561
3562 if (!tcon->ses->server->ops->async_readv)
3563 return -ENOSYS;
3564
3565 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3566 cifs_dbg(FYI, "attempting read on write only file instance\n");
3567
3568 ctx = cifs_aio_ctx_alloc();
3569 if (!ctx)
3570 return -ENOMEM;
3571
3572 ctx->cfile = cifsFileInfo_get(cfile);
3573
3574 if (!is_sync_kiocb(iocb))
3575 ctx->iocb = iocb;
3576
David Howells00e23702018-10-22 13:07:28 +01003577 if (iter_is_iovec(to))
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003578 ctx->should_dirty = true;
3579
Long Li6e6e2b82018-10-31 22:13:09 +00003580 if (direct) {
3581 ctx->pos = offset;
3582 ctx->direct_io = true;
3583 ctx->iter = *to;
3584 ctx->len = len;
3585 } else {
3586 rc = setup_aio_ctx_iter(ctx, to, READ);
3587 if (rc) {
3588 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3589 return rc;
3590 }
3591 len = ctx->len;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003592 }
3593
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003594 /* grab a lock here due to read response handlers can access ctx */
3595 mutex_lock(&ctx->aio_mutex);
3596
3597 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
3598
3599 /* if at least one read request send succeeded, then reset rc */
3600 if (!list_empty(&ctx->list))
3601 rc = 0;
3602
3603 mutex_unlock(&ctx->aio_mutex);
3604
3605 if (rc) {
3606 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3607 return rc;
3608 }
3609
3610 if (!is_sync_kiocb(iocb)) {
3611 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3612 return -EIOCBQUEUED;
3613 }
3614
3615 rc = wait_for_completion_killable(&ctx->done);
3616 if (rc) {
3617 mutex_lock(&ctx->aio_mutex);
3618 ctx->rc = rc = -EINTR;
3619 total_read = ctx->total_len;
3620 mutex_unlock(&ctx->aio_mutex);
3621 } else {
3622 rc = ctx->rc;
3623 total_read = ctx->total_len;
3624 }
3625
3626 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3627
Al Viro0165e812014-02-04 14:19:48 -05003628 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003629 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003630 return total_read;
3631 }
3632 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003633}
3634
Long Li6e6e2b82018-10-31 22:13:09 +00003635ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
3636{
3637 return __cifs_readv(iocb, to, true);
3638}
3639
3640ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3641{
3642 return __cifs_readv(iocb, to, false);
3643}
3644
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003645ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003646cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003647{
Al Viro496ad9a2013-01-23 17:07:38 -05003648 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003649 struct cifsInodeInfo *cinode = CIFS_I(inode);
3650 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3651 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3652 iocb->ki_filp->private_data;
3653 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3654 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003655
3656 /*
3657 * In strict cache mode we need to read from the server all the time
3658 * if we don't have level II oplock because the server can delay mtime
3659 * change - so we can't make a decision about inode invalidating.
3660 * And we can also fail with pagereading if there are mandatory locks
3661 * on pages affected by this read but not on the region from pos to
3662 * pos+len-1.
3663 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003664 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003665 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003666
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003667 if (cap_unix(tcon->ses) &&
3668 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3669 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003670 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003671
3672 /*
3673 * We need to hold the sem to be sure nobody modifies lock list
3674 * with a brlock that prevents reading.
3675 */
3676 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003677 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003678 tcon->ses->server->vals->shared_lock_type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10003679 0, NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003680 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003681 up_read(&cinode->lock_sem);
3682 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003683}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003685static ssize_t
3686cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687{
3688 int rc = -EACCES;
3689 unsigned int bytes_read = 0;
3690 unsigned int total_read;
3691 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003692 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003694 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003695 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003696 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003697 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003698 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003699 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003700 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003701 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003703 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003704 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003706 /* FIXME: set up handlers for larger reads and/or convert to async */
3707 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3708
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303710 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003711 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303712 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003714 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003715 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003716 server = tcon->ses->server;
3717
3718 if (!server->ops->sync_read) {
3719 free_xid(xid);
3720 return -ENOSYS;
3721 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003723 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3724 pid = open_file->pid;
3725 else
3726 pid = current->tgid;
3727
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003729 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003731 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3732 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003733 do {
3734 current_read_size = min_t(uint, read_size - total_read,
3735 rsize);
3736 /*
3737 * For windows me and 9x we do not want to request more
3738 * than it negotiated since it will refuse the read
3739 * then.
3740 */
3741 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003742 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003743 current_read_size = min_t(uint,
3744 current_read_size, CIFSMaxBufSize);
3745 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003746 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003747 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748 if (rc != 0)
3749 break;
3750 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003751 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003752 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003753 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003754 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003755 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003756 &bytes_read, &cur_offset,
3757 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003758 } while (rc == -EAGAIN);
3759
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760 if (rc || (bytes_read == 0)) {
3761 if (total_read) {
3762 break;
3763 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003764 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765 return rc;
3766 }
3767 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003768 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003769 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770 }
3771 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003772 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773 return total_read;
3774}
3775
Jeff Laytonca83ce32011-04-12 09:13:44 -04003776/*
3777 * If the page is mmap'ed into a process' page tables, then we need to make
3778 * sure that it doesn't change while being written back.
3779 */
Souptick Joardera5240cb2018-04-15 00:58:25 +05303780static vm_fault_t
Dave Jiang11bac802017-02-24 14:56:41 -08003781cifs_page_mkwrite(struct vm_fault *vmf)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003782{
3783 struct page *page = vmf->page;
3784
3785 lock_page(page);
3786 return VM_FAULT_LOCKED;
3787}
3788
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003789static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003790 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003791 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003792 .page_mkwrite = cifs_page_mkwrite,
3793};
3794
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003795int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3796{
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003797 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05003798 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003799
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003800 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003801
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003802 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003803 rc = cifs_zap_mapping(inode);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003804 if (!rc)
3805 rc = generic_file_mmap(file, vma);
3806 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003807 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003808
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003809 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003810 return rc;
3811}
3812
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3814{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815 int rc, xid;
3816
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003817 xid = get_xid();
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003818
Jeff Laytonabab0952010-02-12 07:44:18 -05003819 rc = cifs_revalidate_file(file);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003820 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003821 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3822 rc);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003823 if (!rc)
3824 rc = generic_file_mmap(file, vma);
3825 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003826 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003827
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003828 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003829 return rc;
3830}
3831
Jeff Layton0471ca32012-05-16 07:13:16 -04003832static void
3833cifs_readv_complete(struct work_struct *work)
3834{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003835 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003836 struct cifs_readdata *rdata = container_of(work,
3837 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003838
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003839 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003840 for (i = 0; i < rdata->nr_pages; i++) {
3841 struct page *page = rdata->pages[i];
3842
Jeff Layton0471ca32012-05-16 07:13:16 -04003843 lru_cache_add_file(page);
3844
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003845 if (rdata->result == 0 ||
3846 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003847 flush_dcache_page(page);
3848 SetPageUptodate(page);
3849 }
3850
3851 unlock_page(page);
3852
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003853 if (rdata->result == 0 ||
3854 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04003855 cifs_readpage_to_fscache(rdata->mapping->host, page);
3856
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003857 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003858
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003859 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003860 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003861 }
Jeff Layton6993f742012-05-16 07:13:17 -04003862 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003863}
3864
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003865static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003866readpages_fill_pages(struct TCP_Server_Info *server,
3867 struct cifs_readdata *rdata, struct iov_iter *iter,
3868 unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003869{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003870 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003871 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003872 u64 eof;
3873 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003874 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003875 unsigned int page_offset = rdata->page_offset;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003876
3877 /* determine the eof that the server (probably) has */
3878 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003879 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003880 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003881
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003882 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003883 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003884 for (i = 0; i < nr_pages; i++) {
3885 struct page *page = rdata->pages[i];
Long Li1dbe3462018-05-30 12:47:55 -07003886 unsigned int to_read = rdata->pagesz;
3887 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003888
Long Li1dbe3462018-05-30 12:47:55 -07003889 if (i == 0)
3890 to_read -= page_offset;
3891 else
3892 page_offset = 0;
3893
3894 n = to_read;
3895
3896 if (len >= to_read) {
3897 len -= to_read;
Jeff Layton8321fec2012-09-19 06:22:32 -07003898 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003899 /* enough for partial page, fill and zero the rest */
Long Li1dbe3462018-05-30 12:47:55 -07003900 zero_user(page, len + page_offset, to_read - len);
Al Viro71335662016-01-09 19:54:50 -05003901 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07003902 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003903 } else if (page->index > eof_index) {
3904 /*
3905 * The VFS will not try to do readahead past the
3906 * i_size, but it's possible that we have outstanding
3907 * writes with gaps in the middle and the i_size hasn't
3908 * caught up yet. Populate those with zeroed out pages
3909 * to prevent the VFS from repeatedly attempting to
3910 * fill them until the writes are flushed.
3911 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003912 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003913 lru_cache_add_file(page);
3914 flush_dcache_page(page);
3915 SetPageUptodate(page);
3916 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003917 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003918 rdata->pages[i] = NULL;
3919 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003920 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003921 } else {
3922 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003923 lru_cache_add_file(page);
3924 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003925 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003926 rdata->pages[i] = NULL;
3927 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003928 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003929 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003930
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003931 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003932 result = copy_page_from_iter(
3933 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003934#ifdef CONFIG_CIFS_SMB_DIRECT
3935 else if (rdata->mr)
3936 result = n;
3937#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003938 else
Long Li1dbe3462018-05-30 12:47:55 -07003939 result = cifs_read_page_from_socket(
3940 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003941 if (result < 0)
3942 break;
3943
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003944 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003945 }
3946
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003947 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3948 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003949}
3950
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003951static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003952cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3953 struct cifs_readdata *rdata, unsigned int len)
3954{
3955 return readpages_fill_pages(server, rdata, NULL, len);
3956}
3957
3958static int
3959cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
3960 struct cifs_readdata *rdata,
3961 struct iov_iter *iter)
3962{
3963 return readpages_fill_pages(server, rdata, iter, iter->count);
3964}
3965
3966static int
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003967readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3968 unsigned int rsize, struct list_head *tmplist,
3969 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3970{
3971 struct page *page, *tpage;
3972 unsigned int expected_index;
3973 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07003974 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003975
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003976 INIT_LIST_HEAD(tmplist);
3977
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003978 page = list_entry(page_list->prev, struct page, lru);
3979
3980 /*
3981 * Lock the page and put it in the cache. Since no one else
3982 * should have access to this page, we're safe to simply set
3983 * PG_locked without checking it first.
3984 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003985 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003986 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07003987 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003988
3989 /* give up if we can't stick it in the cache */
3990 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003991 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003992 return rc;
3993 }
3994
3995 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003996 *offset = (loff_t)page->index << PAGE_SHIFT;
3997 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003998 *nr_pages = 1;
3999 list_move_tail(&page->lru, tmplist);
4000
4001 /* now try and add more pages onto the request */
4002 expected_index = page->index + 1;
4003 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
4004 /* discontinuity ? */
4005 if (page->index != expected_index)
4006 break;
4007
4008 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004009 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004010 break;
4011
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004012 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07004013 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004014 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004015 break;
4016 }
4017 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004018 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004019 expected_index++;
4020 (*nr_pages)++;
4021 }
4022 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023}
4024
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025static int cifs_readpages(struct file *file, struct address_space *mapping,
4026 struct list_head *page_list, unsigned num_pages)
4027{
Jeff Layton690c5e32011-10-19 15:30:16 -04004028 int rc;
4029 struct list_head tmplist;
4030 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04004031 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004032 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04004033 pid_t pid;
Steve French0cb012d2018-10-11 01:01:02 -05004034 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035
Steve French0cb012d2018-10-11 01:01:02 -05004036 xid = get_xid();
Jeff Layton690c5e32011-10-19 15:30:16 -04004037 /*
Suresh Jayaraman566982362010-07-05 18:13:25 +05304038 * Reads as many pages as possible from fscache. Returns -ENOBUFS
4039 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00004040 *
4041 * After this point, every page in the list might have PG_fscache set,
4042 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman566982362010-07-05 18:13:25 +05304043 */
4044 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
4045 &num_pages);
Steve French0cb012d2018-10-11 01:01:02 -05004046 if (rc == 0) {
4047 free_xid(xid);
Jeff Layton690c5e32011-10-19 15:30:16 -04004048 return rc;
Steve French0cb012d2018-10-11 01:01:02 -05004049 }
Suresh Jayaraman566982362010-07-05 18:13:25 +05304050
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004051 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4052 pid = open_file->pid;
4053 else
4054 pid = current->tgid;
4055
Jeff Layton690c5e32011-10-19 15:30:16 -04004056 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004057 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058
Joe Perchesf96637b2013-05-04 22:12:25 -05004059 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4060 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04004061
4062 /*
4063 * Start with the page at end of list and move it to private
4064 * list. Do the same with any following pages until we hit
4065 * the rsize limit, hit an index discontinuity, or run out of
4066 * pages. Issue the async read and then start the loop again
4067 * until the list is empty.
4068 *
4069 * Note that list order is important. The page_list is in
4070 * the order of declining indexes. When we put the pages in
4071 * the rdata->pages, then we want them in increasing order.
4072 */
4073 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004074 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04004075 loff_t offset;
4076 struct page *page, *tpage;
4077 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004078 unsigned credits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004080 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
4081 &rsize, &credits);
4082 if (rc)
4083 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084
Jeff Layton690c5e32011-10-19 15:30:16 -04004085 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004086 * Give up immediately if rsize is too small to read an entire
4087 * page. The VFS will fall back to readpage. We should never
4088 * reach this point however since we set ra_pages to 0 when the
4089 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04004090 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004091 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004092 add_credits_and_wake_if(server, credits, 0);
Steve French0cb012d2018-10-11 01:01:02 -05004093 free_xid(xid);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004094 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004096
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004097 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
4098 &nr_pages, &offset, &bytes);
4099 if (rc) {
4100 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04004102 }
4103
Jeff Layton0471ca32012-05-16 07:13:16 -04004104 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04004105 if (!rdata) {
4106 /* best to give up if we're out of mem */
4107 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4108 list_del(&page->lru);
4109 lru_cache_add_file(page);
4110 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004111 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04004112 }
4113 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004114 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04004115 break;
4116 }
4117
Jeff Layton6993f742012-05-16 07:13:17 -04004118 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04004119 rdata->mapping = mapping;
4120 rdata->offset = offset;
4121 rdata->bytes = bytes;
4122 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004123 rdata->pagesz = PAGE_SIZE;
Long Li1dbe3462018-05-30 12:47:55 -07004124 rdata->tailsz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07004125 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004126 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004127 rdata->credits = credits;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004128
4129 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4130 list_del(&page->lru);
4131 rdata->pages[rdata->nr_pages++] = page;
4132 }
Jeff Layton690c5e32011-10-19 15:30:16 -04004133
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004134 if (!rdata->cfile->invalidHandle ||
Germano Percossi1fa839b2017-04-07 12:29:38 +01004135 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004136 rc = server->ops->async_readv(rdata);
4137 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004138 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004139 for (i = 0; i < rdata->nr_pages; i++) {
4140 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04004141 lru_cache_add_file(page);
4142 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004143 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04004145 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04004146 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147 break;
4148 }
Jeff Layton6993f742012-05-16 07:13:17 -04004149
4150 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151 }
4152
David Howells54afa992013-09-04 17:10:39 +00004153 /* Any pages that have been shown to fscache but didn't get added to
4154 * the pagecache must be uncached before they get returned to the
4155 * allocator.
4156 */
4157 cifs_fscache_readpages_cancel(mapping->host, page_list);
Steve French0cb012d2018-10-11 01:01:02 -05004158 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004159 return rc;
4160}
4161
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01004162/*
4163 * cifs_readpage_worker must be called with the page pinned
4164 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165static int cifs_readpage_worker(struct file *file, struct page *page,
4166 loff_t *poffset)
4167{
4168 char *read_data;
4169 int rc;
4170
Suresh Jayaraman566982362010-07-05 18:13:25 +05304171 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05004172 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304173 if (rc == 0)
4174 goto read_complete;
4175
Linus Torvalds1da177e2005-04-16 15:20:36 -07004176 read_data = kmap(page);
4177 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004178
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004179 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004180
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181 if (rc < 0)
4182 goto io_error;
4183 else
Joe Perchesf96637b2013-05-04 22:12:25 -05004184 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004185
Steve French9b9c5be2018-09-22 12:07:06 -05004186 /* we do not want atime to be less than mtime, it broke some apps */
4187 file_inode(file)->i_atime = current_time(file_inode(file));
4188 if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4189 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4190 else
4191 file_inode(file)->i_atime = current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004192
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004193 if (PAGE_SIZE > rc)
4194 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195
4196 flush_dcache_page(page);
4197 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304198
4199 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05004200 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304201
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004203
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004205 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004206 unlock_page(page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304207
4208read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209 return rc;
4210}
4211
4212static int cifs_readpage(struct file *file, struct page *page)
4213{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004214 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004216 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004218 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219
4220 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304221 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004222 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304223 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224 }
4225
Joe Perchesf96637b2013-05-04 22:12:25 -05004226 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00004227 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228
4229 rc = cifs_readpage_worker(file, page, &offset);
4230
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004231 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232 return rc;
4233}
4234
Steve Frencha403a0a2007-07-26 15:54:16 +00004235static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4236{
4237 struct cifsFileInfo *open_file;
Steve French3afca262016-09-22 18:58:16 -05004238 struct cifs_tcon *tcon =
4239 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
Steve Frencha403a0a2007-07-26 15:54:16 +00004240
Steve French3afca262016-09-22 18:58:16 -05004241 spin_lock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004242 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04004243 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French3afca262016-09-22 18:58:16 -05004244 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004245 return 1;
4246 }
4247 }
Steve French3afca262016-09-22 18:58:16 -05004248 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004249 return 0;
4250}
4251
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252/* We do not want to update the file size from server for inodes
4253 open for write - to avoid races with writepage extending
4254 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004255 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256 but this is tricky to do without racing with writebehind
4257 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00004258bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259{
Steve Frencha403a0a2007-07-26 15:54:16 +00004260 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00004261 return true;
Steve French23e7dd72005-10-20 13:44:56 -07004262
Steve Frencha403a0a2007-07-26 15:54:16 +00004263 if (is_inode_writable(cifsInode)) {
4264 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08004265 struct cifs_sb_info *cifs_sb;
4266
Steve Frenchc32a0b62006-01-12 14:41:28 -08004267 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00004268 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004269 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08004270 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00004271 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08004272 }
4273
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004274 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00004275 return true;
Steve French7ba526312007-02-08 18:14:13 +00004276
Steve French4b18f2a2008-04-29 00:06:05 +00004277 return false;
Steve French23e7dd72005-10-20 13:44:56 -07004278 } else
Steve French4b18f2a2008-04-29 00:06:05 +00004279 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280}
4281
Nick Piggind9414772008-09-24 11:32:59 -04004282static int cifs_write_begin(struct file *file, struct address_space *mapping,
4283 loff_t pos, unsigned len, unsigned flags,
4284 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285{
Sachin Prabhu466bd312013-09-13 14:11:57 +01004286 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004287 pgoff_t index = pos >> PAGE_SHIFT;
4288 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004289 loff_t page_start = pos & PAGE_MASK;
4290 loff_t i_size;
4291 struct page *page;
4292 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293
Joe Perchesf96637b2013-05-04 22:12:25 -05004294 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04004295
Sachin Prabhu466bd312013-09-13 14:11:57 +01004296start:
Nick Piggin54566b22009-01-04 12:00:53 -08004297 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004298 if (!page) {
4299 rc = -ENOMEM;
4300 goto out;
4301 }
Nick Piggind9414772008-09-24 11:32:59 -04004302
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004303 if (PageUptodate(page))
4304 goto out;
Steve French8a236262007-03-06 00:31:00 +00004305
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004306 /*
4307 * If we write a full page it will be up to date, no need to read from
4308 * the server. If the write is short, we'll end up doing a sync write
4309 * instead.
4310 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004311 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004312 goto out;
4313
4314 /*
4315 * optimize away the read when we have an oplock, and we're not
4316 * expecting to use any of the data we'd be reading in. That
4317 * is, when the page lies beyond the EOF, or straddles the EOF
4318 * and the write will cover all of the existing data.
4319 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004320 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004321 i_size = i_size_read(mapping->host);
4322 if (page_start >= i_size ||
4323 (offset == 0 && (pos + len) >= i_size)) {
4324 zero_user_segments(page, 0, offset,
4325 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004326 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004327 /*
4328 * PageChecked means that the parts of the page
4329 * to which we're not writing are considered up
4330 * to date. Once the data is copied to the
4331 * page, it can be set uptodate.
4332 */
4333 SetPageChecked(page);
4334 goto out;
4335 }
4336 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337
Sachin Prabhu466bd312013-09-13 14:11:57 +01004338 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004339 /*
4340 * might as well read a page, it is fast enough. If we get
4341 * an error, we don't need to return it. cifs_write_end will
4342 * do a sync write instead since PG_uptodate isn't set.
4343 */
4344 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004345 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004346 oncethru = 1;
4347 goto start;
Steve French8a236262007-03-06 00:31:00 +00004348 } else {
4349 /* we could try using another file handle if there is one -
4350 but how would we lock it to prevent close of that handle
4351 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04004352 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00004353 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004354out:
4355 *pagep = page;
4356 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357}
4358
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304359static int cifs_release_page(struct page *page, gfp_t gfp)
4360{
4361 if (PagePrivate(page))
4362 return 0;
4363
4364 return cifs_fscache_release_page(page, gfp);
4365}
4366
Lukas Czernerd47992f2013-05-21 23:17:23 -04004367static void cifs_invalidate_page(struct page *page, unsigned int offset,
4368 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304369{
4370 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4371
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004372 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304373 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4374}
4375
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004376static int cifs_launder_page(struct page *page)
4377{
4378 int rc = 0;
4379 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004380 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004381 struct writeback_control wbc = {
4382 .sync_mode = WB_SYNC_ALL,
4383 .nr_to_write = 0,
4384 .range_start = range_start,
4385 .range_end = range_end,
4386 };
4387
Joe Perchesf96637b2013-05-04 22:12:25 -05004388 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004389
4390 if (clear_page_dirty_for_io(page))
4391 rc = cifs_writepage_locked(page, &wbc);
4392
4393 cifs_fscache_invalidate_page(page, page->mapping->host);
4394 return rc;
4395}
4396
Tejun Heo9b646972010-07-20 22:09:02 +02004397void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04004398{
4399 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4400 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00004401 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004402 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004403 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004404 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004405 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004406
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004407 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10004408 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004409
4410 server->ops->downgrade_oplock(server, cinode,
4411 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
4412
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004413 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004414 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004415 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4416 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004417 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004418 }
4419
Jeff Layton3bc303c2009-09-21 06:47:50 -04004420 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004421 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05004422 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00004423 else
Al Viro8737c932009-12-24 06:47:55 -05004424 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004425 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004426 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004427 rc = filemap_fdatawait(inode->i_mapping);
4428 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004429 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004430 }
Joe Perchesf96637b2013-05-04 22:12:25 -05004431 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004432 }
4433
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004434 rc = cifs_push_locks(cfile);
4435 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004436 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004437
Jeff Layton3bc303c2009-09-21 06:47:50 -04004438 /*
4439 * releasing stale oplock after recent reconnect of smb session using
4440 * a now incorrect file handle is not a data integrity issue but do
4441 * not bother sending an oplock release if session to server still is
4442 * disconnected since oplock already released by the server
4443 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00004444 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004445 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4446 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05004447 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004448 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004449 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004450}
4451
Steve Frenchdca69282013-11-11 16:42:37 -06004452/*
4453 * The presence of cifs_direct_io() in the address space ops vector
4454 * allowes open() O_DIRECT flags which would have failed otherwise.
4455 *
4456 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4457 * so this method should never be called.
4458 *
4459 * Direct IO is not yet supported in the cached mode.
4460 */
4461static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07004462cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06004463{
4464 /*
4465 * FIXME
4466 * Eventually need to support direct IO for non forcedirectio mounts
4467 */
4468 return -EINVAL;
4469}
4470
4471
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004472const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473 .readpage = cifs_readpage,
4474 .readpages = cifs_readpages,
4475 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07004476 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004477 .write_begin = cifs_write_begin,
4478 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304480 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06004481 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304482 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004483 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004485
4486/*
4487 * cifs_readpages requires the server to support a buffer large enough to
4488 * contain the header plus one complete page of data. Otherwise, we need
4489 * to leave cifs_readpages out of the address space operations.
4490 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004491const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004492 .readpage = cifs_readpage,
4493 .writepage = cifs_writepage,
4494 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004495 .write_begin = cifs_write_begin,
4496 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004497 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304498 .releasepage = cifs_release_page,
4499 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004500 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004501};