blob: e7af802dcfa600293aa6bf58539dd769892ac6ff [file] [log] [blame]
Steve French929be902021-06-18 00:31:49 -05001// SPDX-License-Identifier: LGPL-2.1
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
11#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070012#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/stat.h>
14#include <linux/fcntl.h>
15#include <linux/pagemap.h>
16#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070017#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080018#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070019#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040020#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040022#include <linux/swap.h>
Nikolay Borisovf86196e2019-01-03 15:29:02 -080023#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/div64.h>
25#include "cifsfs.h"
26#include "cifspdu.h"
27#include "cifsglob.h"
28#include "cifsproto.h"
29#include "cifs_unicode.h"
30#include "cifs_debug.h"
31#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053032#include "fscache.h"
Long Libd3dcc62017-11-22 17:38:47 -070033#include "smbdirect.h"
Ronnie Sahlberg8401e932020-12-12 13:40:50 -060034#include "fs_context.h"
Steve French087f7572021-04-29 00:18:43 -050035#include "cifs_ioctl.h"
Steve French07b92d02013-02-18 10:34:26 -060036
Linus Torvalds1da177e2005-04-16 15:20:36 -070037static inline int cifs_convert_flags(unsigned int flags)
38{
39 if ((flags & O_ACCMODE) == O_RDONLY)
40 return GENERIC_READ;
41 else if ((flags & O_ACCMODE) == O_WRONLY)
42 return GENERIC_WRITE;
43 else if ((flags & O_ACCMODE) == O_RDWR) {
44 /* GENERIC_ALL is too much permission to request
45 can cause unnecessary access denied on create */
46 /* return GENERIC_ALL; */
47 return (GENERIC_READ | GENERIC_WRITE);
48 }
49
Jeff Laytone10f7b52008-05-14 10:21:33 -070050 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
51 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
52 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000053}
Jeff Laytone10f7b52008-05-14 10:21:33 -070054
Jeff Layton608712f2010-10-15 15:33:56 -040055static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000056{
Jeff Layton608712f2010-10-15 15:33:56 -040057 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070058
Steve French7fc8f4e2009-02-23 20:43:11 +000059 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040060 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000061 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040062 posix_flags = SMB_O_WRONLY;
63 else if ((flags & O_ACCMODE) == O_RDWR)
64 posix_flags = SMB_O_RDWR;
65
Steve French07b92d02013-02-18 10:34:26 -060066 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040067 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060068 if (flags & O_EXCL)
69 posix_flags |= SMB_O_EXCL;
70 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050071 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
72 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060073
Jeff Layton608712f2010-10-15 15:33:56 -040074 if (flags & O_TRUNC)
75 posix_flags |= SMB_O_TRUNC;
76 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010077 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040078 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000079 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040080 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000081 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040082 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000083 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040084 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000085
86 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
89static inline int cifs_get_disposition(unsigned int flags)
90{
91 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
92 return FILE_CREATE;
93 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
94 return FILE_OVERWRITE_IF;
95 else if ((flags & O_CREAT) == O_CREAT)
96 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +000097 else if ((flags & O_TRUNC) == O_TRUNC)
98 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 else
100 return FILE_OPEN;
101}
102
Al Virof6f1f172021-03-18 15:44:05 -0400103int cifs_posix_open(const char *full_path, struct inode **pinode,
Jeff Layton608712f2010-10-15 15:33:56 -0400104 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400105 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400106{
107 int rc;
108 FILE_UNIX_BASIC_INFO *presp_data;
109 __u32 posix_flags = 0;
110 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
111 struct cifs_fattr fattr;
112 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000113 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400114
Joe Perchesf96637b2013-05-04 22:12:25 -0500115 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400116
117 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
118 if (presp_data == NULL)
119 return -ENOMEM;
120
121 tlink = cifs_sb_tlink(cifs_sb);
122 if (IS_ERR(tlink)) {
123 rc = PTR_ERR(tlink);
124 goto posix_open_ret;
125 }
126
127 tcon = tlink_tcon(tlink);
128 mode &= ~current_umask();
129
130 posix_flags = cifs_posix_convert_flags(f_flags);
131 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
132 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900133 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400134 cifs_put_tlink(tlink);
135
136 if (rc)
137 goto posix_open_ret;
138
139 if (presp_data->Type == cpu_to_le32(-1))
140 goto posix_open_ret; /* open ok, caller does qpathinfo */
141
142 if (!pinode)
143 goto posix_open_ret; /* caller does not need info */
144
145 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
146
147 /* get new inode and set it up */
148 if (*pinode == NULL) {
149 cifs_fill_uniqueid(sb, &fattr);
150 *pinode = cifs_iget(sb, &fattr);
151 if (!*pinode) {
152 rc = -ENOMEM;
153 goto posix_open_ret;
154 }
155 } else {
Ronnie Sahlbergcee8f4f2021-03-25 16:26:35 +1000156 cifs_revalidate_mapping(*pinode);
Al Viro4d669522021-02-10 21:23:04 -0500157 rc = cifs_fattr_to_inode(*pinode, &fattr);
Jeff Layton608712f2010-10-15 15:33:56 -0400158 }
159
160posix_open_ret:
161 kfree(presp_data);
162 return rc;
163}
164
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300165static int
Al Virof6f1f172021-03-18 15:44:05 -0400166cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700167 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
168 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300169{
170 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700171 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300172 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500173 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700175 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400176 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700178 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700179 return -ENOSYS;
180
181 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300182
183/*********************************************************************
184 * open flag mapping table:
185 *
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
193 *
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
199 *?
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
206
207 disposition = cifs_get_disposition(f_flags);
208
209 /* BB pass O_SYNC flag through on file attributes .. BB */
210
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
214
Steve French1013e762017-09-22 01:40:27 -0500215 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
216 if (f_flags & O_SYNC)
217 create_options |= CREATE_WRITE_THROUGH;
218
219 if (f_flags & O_DIRECT)
220 create_options |= CREATE_NO_BUFFER;
221
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400222 oparms.tcon = tcon;
223 oparms.cifs_sb = cifs_sb;
224 oparms.desired_access = desired_access;
Amir Goldstein0f060932020-02-03 21:46:43 +0200225 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400226 oparms.disposition = disposition;
227 oparms.path = full_path;
228 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400229 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400230
231 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300232
233 if (rc)
234 goto out;
235
Steve Frenchd3138522020-06-11 22:43:01 -0500236 /* TODO: Add support for calling posix query info but with passing in fid */
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300237 if (tcon->unix_ext)
238 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
239 xid);
240 else
241 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600242 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300243
Pavel Shilovsky30573a822019-09-30 10:06:18 -0700244 if (rc) {
245 server->ops->close(xid, tcon, fid);
246 if (rc == -ESTALE)
247 rc = -EOPENSTALE;
248 }
249
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300250out:
251 kfree(buf);
252 return rc;
253}
254
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400255static bool
256cifs_has_mand_locks(struct cifsInodeInfo *cinode)
257{
258 struct cifs_fid_locks *cur;
259 bool has_locks = false;
260
261 down_read(&cinode->lock_sem);
262 list_for_each_entry(cur, &cinode->llist, llist) {
263 if (!list_empty(&cur->locks)) {
264 has_locks = true;
265 break;
266 }
267 }
268 up_read(&cinode->lock_sem);
269 return has_locks;
270}
271
Dave Wysochanskid46b0da2019-10-23 05:02:33 -0400272void
273cifs_down_write(struct rw_semaphore *sem)
274{
275 while (!down_write_trylock(sem))
276 msleep(10);
277}
278
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000279static void cifsFileInfo_put_work(struct work_struct *work);
280
Jeff Layton15ecb432010-10-15 15:34:02 -0400281struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700282cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400283 struct tcon_link *tlink, __u32 oplock)
284{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500285 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000286 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700287 struct cifsInodeInfo *cinode = CIFS_I(inode);
288 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700289 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700290 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400291 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400292
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700293 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
294 if (cfile == NULL)
295 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400296
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700297 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
298 if (!fdlocks) {
299 kfree(cfile);
300 return NULL;
301 }
302
303 INIT_LIST_HEAD(&fdlocks->locks);
304 fdlocks->cfile = cfile;
305 cfile->llist = fdlocks;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700306
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700307 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700308 cfile->pid = current->tgid;
309 cfile->uid = current_fsuid();
310 cfile->dentry = dget(dentry);
311 cfile->f_flags = file->f_flags;
312 cfile->invalidHandle = false;
Rohith Surabattula860b69a2021-05-05 10:56:47 +0000313 cfile->deferred_close_scheduled = false;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700314 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700315 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000316 INIT_WORK(&cfile->put, cifsFileInfo_put_work);
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500317 INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700318 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500319 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400320
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100321 cifs_sb_active(inode->i_sb);
322
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400323 /*
324 * If the server returned a read oplock and we have mandatory brlocks,
325 * set oplock level to None.
326 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400327 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500328 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400329 oplock = 0;
330 }
331
Pavel Shilovsky6f582b22019-11-27 16:18:39 -0800332 cifs_down_write(&cinode->lock_sem);
333 list_add(&fdlocks->llist, &cinode->llist);
334 up_write(&cinode->lock_sem);
335
Steve French3afca262016-09-22 18:58:16 -0500336 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400337 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700338 oplock = fid->pending_open->oplock;
339 list_del(&fid->pending_open->olist);
340
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400341 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400342 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700343
344 list_add(&cfile->tlist, &tcon->openFileList);
Steve Frenchfae80442018-10-19 17:14:32 -0500345 atomic_inc(&tcon->num_local_opens);
Steve French3afca262016-09-22 18:58:16 -0500346
Jeff Layton15ecb432010-10-15 15:34:02 -0400347 /* if readable file instance put first in list*/
Ronnie Sahlberg487317c2019-06-05 10:38:38 +1000348 spin_lock(&cinode->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400349 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700350 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400351 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700352 list_add_tail(&cfile->flist, &cinode->openFileList);
Ronnie Sahlberg487317c2019-06-05 10:38:38 +1000353 spin_unlock(&cinode->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500354 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400355
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400356 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400357 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400358
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700359 file->private_data = cfile;
360 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400361}
362
Jeff Layton764a1b12012-07-25 14:59:54 -0400363struct cifsFileInfo *
364cifsFileInfo_get(struct cifsFileInfo *cifs_file)
365{
Steve French3afca262016-09-22 18:58:16 -0500366 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400367 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500368 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400369 return cifs_file;
370}
371
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000372static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
373{
374 struct inode *inode = d_inode(cifs_file->dentry);
375 struct cifsInodeInfo *cifsi = CIFS_I(inode);
376 struct cifsLockInfo *li, *tmp;
377 struct super_block *sb = inode->i_sb;
378
379 /*
380 * Delete any outstanding lock records. We'll lose them when the file
381 * is closed anyway.
382 */
383 cifs_down_write(&cifsi->lock_sem);
384 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
385 list_del(&li->llist);
386 cifs_del_lock_waiters(li);
387 kfree(li);
388 }
389 list_del(&cifs_file->llist->llist);
390 kfree(cifs_file->llist);
391 up_write(&cifsi->lock_sem);
392
393 cifs_put_tlink(cifs_file->tlink);
394 dput(cifs_file->dentry);
395 cifs_sb_deactive(sb);
396 kfree(cifs_file);
397}
398
399static void cifsFileInfo_put_work(struct work_struct *work)
400{
401 struct cifsFileInfo *cifs_file = container_of(work,
402 struct cifsFileInfo, put);
403
404 cifsFileInfo_put_final(cifs_file);
405}
406
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100407/**
408 * cifsFileInfo_put - release a reference of file priv data
409 *
410 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
Steve French607dfc72020-12-12 12:08:58 -0600411 *
412 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000413 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400414void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
415{
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000416 _cifsFileInfo_put(cifs_file, true, true);
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100417}
418
419/**
420 * _cifsFileInfo_put - release a reference of file priv data
421 *
422 * This may involve closing the filehandle @cifs_file out on the
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000423 * server. Must be called without holding tcon->open_file_lock,
424 * cinode->open_file_lock and cifs_file->file_info_lock.
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100425 *
426 * If @wait_for_oplock_handler is true and we are releasing the last
427 * reference, wait for any running oplock break handler of the file
Steve French607dfc72020-12-12 12:08:58 -0600428 * and cancel any pending one.
429 *
430 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
431 * @wait_oplock_handler: must be false if called from oplock_break_handler
432 * @offload: not offloaded on close and oplock breaks
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100433 *
434 */
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000435void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
436 bool wait_oplock_handler, bool offload)
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100437{
David Howells2b0143b2015-03-17 22:25:59 +0000438 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000439 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700440 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300441 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100442 struct super_block *sb = inode->i_sb;
443 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700444 struct cifs_fid fid;
445 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000446 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000447
Steve French3afca262016-09-22 18:58:16 -0500448 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky1a67c412019-10-23 15:37:19 -0700449 spin_lock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500450 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400451 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500452 spin_unlock(&cifs_file->file_info_lock);
Pavel Shilovsky1a67c412019-10-23 15:37:19 -0700453 spin_unlock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500454 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000455 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400456 }
Steve French3afca262016-09-22 18:58:16 -0500457 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000458
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700459 if (server->ops->get_lease_key)
460 server->ops->get_lease_key(inode, &fid);
461
462 /* store open in pending opens to make sure we don't miss lease break */
463 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
464
Steve Frenchcdff08e2010-10-21 22:46:14 +0000465 /* remove it from the lists */
466 list_del(&cifs_file->flist);
467 list_del(&cifs_file->tlist);
Steve Frenchfae80442018-10-19 17:14:32 -0500468 atomic_dec(&tcon->num_local_opens);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000469
470 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500471 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000472 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700473 /*
474 * In strict cache mode we need invalidate mapping on the last
475 * close because it may cause a error when we open this file
476 * again and get at least level II oplock.
477 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300478 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400479 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300480 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000481 }
Steve French3afca262016-09-22 18:58:16 -0500482
Pavel Shilovsky1a67c412019-10-23 15:37:19 -0700483 spin_unlock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500484 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000485
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100486 oplock_break_cancelled = wait_oplock_handler ?
487 cancel_work_sync(&cifs_file->oplock_break) : false;
Jeff Laytonad635942011-07-26 12:20:17 -0400488
Steve Frenchcdff08e2010-10-21 22:46:14 +0000489 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700490 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400491 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700492
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400493 xid = get_xid();
Steve French43f8a6a2019-12-02 21:46:54 -0600494 if (server->ops->close_getattr)
495 server->ops->close_getattr(xid, tcon, cifs_file);
496 else if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400497 server->ops->close(xid, tcon, &cifs_file->fid);
498 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000499 }
500
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000501 if (oplock_break_cancelled)
502 cifs_done_oplock_break(cifsi);
503
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700504 cifs_del_pending_open(&open);
505
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000506 if (offload)
507 queue_work(fileinfo_put_wq, &cifs_file->put);
508 else
509 cifsFileInfo_put_final(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400510}
511
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700513
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514{
515 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400516 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400517 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700519 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000520 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400521 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700522 struct cifsFileInfo *cfile = NULL;
Al Virof6a9bc32021-03-05 17:36:04 -0500523 void *page;
524 const char *full_path;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300525 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700526 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700527 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400529 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
531 cifs_sb = CIFS_SB(inode->i_sb);
Steve French087f7572021-04-29 00:18:43 -0500532 if (unlikely(cifs_forced_shutdown(cifs_sb))) {
533 free_xid(xid);
534 return -EIO;
535 }
536
Jeff Layton7ffec372010-09-29 19:51:11 -0400537 tlink = cifs_sb_tlink(cifs_sb);
538 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400539 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400540 return PTR_ERR(tlink);
541 }
542 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700543 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
Al Virof6a9bc32021-03-05 17:36:04 -0500545 page = alloc_dentry_path();
546 full_path = build_path_from_dentry(file_dentry(file), page);
547 if (IS_ERR(full_path)) {
548 rc = PTR_ERR(full_path);
Jeff Layton232341b2010-08-05 13:58:38 -0400549 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 }
551
Joe Perchesf96637b2013-05-04 22:12:25 -0500552 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000553 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000554
Namjae Jeon787aded2014-08-22 14:22:51 +0900555 if (file->f_flags & O_DIRECT &&
556 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
557 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
558 file->f_op = &cifs_file_direct_nobrl_ops;
559 else
560 file->f_op = &cifs_file_direct_ops;
561 }
562
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500563 /* Get the cached handle as SMB2 close is deferred */
564 rc = cifs_get_readable_path(tcon, full_path, &cfile);
565 if (rc == 0) {
566 if (file->f_flags == cfile->f_flags) {
567 file->private_data = cfile;
Rohith Surabattula860b69a2021-05-05 10:56:47 +0000568 spin_lock(&CIFS_I(inode)->deferred_lock);
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500569 cifs_del_deferred_close(cfile);
570 spin_unlock(&CIFS_I(inode)->deferred_lock);
David Howells70431bf2020-11-17 15:56:59 +0000571 goto use_cache;
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500572 } else {
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500573 _cifsFileInfo_put(cfile, true, false);
574 }
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500575 }
576
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700577 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000578 oplock = REQ_OPLOCK;
579 else
580 oplock = 0;
581
Steve French64cc2c62009-03-04 19:54:08 +0000582 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400583 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
584 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000585 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400586 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Ronnie Sahlberg8401e932020-12-12 13:40:50 -0600587 cifs_sb->ctx->file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700588 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000589 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500590 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300591 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000592 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
593 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500594 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
Steve Frenchb438fcf2021-02-20 19:24:11 -0600595 tcon->ses->ip_addr,
Joe Perchesf96637b2013-05-04 22:12:25 -0500596 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000597 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000598 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
599 (rc != -EOPNOTSUPP)) /* path not found or net err */
600 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700601 /*
602 * Else fallthrough to retry open the old way on network i/o
603 * or DFS errors.
604 */
Steve French276a74a2009-03-03 18:00:34 +0000605 }
606
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700607 if (server->ops->get_lease_key)
608 server->ops->get_lease_key(inode, &fid);
609
610 cifs_add_pending_open(&fid, tlink, &open);
611
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300612 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700613 if (server->ops->get_lease_key)
614 server->ops->get_lease_key(inode, &fid);
615
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300616 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700617 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700618 if (rc) {
619 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300620 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700621 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300622 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400623
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700624 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
625 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700626 if (server->ops->close)
627 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700628 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 rc = -ENOMEM;
630 goto out;
631 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300633 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700634 /*
635 * Time to set mode which we can not set earlier due to
636 * problems creating new read-only files.
637 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300638 struct cifs_unix_set_info_args args = {
639 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800640 .uid = INVALID_UID, /* no change */
641 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300642 .ctime = NO_CHANGE_64,
643 .atime = NO_CHANGE_64,
644 .mtime = NO_CHANGE_64,
645 .device = 0,
646 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700647 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
648 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 }
650
David Howells70431bf2020-11-17 15:56:59 +0000651use_cache:
652 fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
653 file->f_mode & FMODE_WRITE);
654 if (file->f_flags & O_DIRECT &&
655 (!((file->f_flags & O_ACCMODE) != O_RDONLY) ||
656 file->f_flags & O_APPEND))
657 cifs_invalidate_cache(file_inode(file),
658 FSCACHE_INVAL_DIO_WRITE);
659
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660out:
Al Virof6a9bc32021-03-05 17:36:04 -0500661 free_dentry_path(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400662 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400663 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 return rc;
665}
666
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400667static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
668
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700669/*
670 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400671 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700672 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400673static int
674cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400676 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000677 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400678 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 int rc = 0;
680
Rabin Vincent560d3882017-05-03 17:17:21 +0200681 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400682 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400683 /* can cache locks - no need to relock */
684 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400685 return rc;
686 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400688 if (cap_unix(tcon->ses) &&
689 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
690 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
691 rc = cifs_push_posix_locks(cfile);
692 else
693 rc = tcon->ses->server->ops->push_mand_locks(cfile);
694
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400695 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 return rc;
697}
698
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700699static int
700cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701{
702 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400703 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400704 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000706 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700707 struct TCP_Server_Info *server;
708 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000709 struct inode *inode;
Al Virof6a9bc32021-03-05 17:36:04 -0500710 void *page;
711 const char *full_path;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700712 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500714 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400715 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400717 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700718 mutex_lock(&cfile->fh_mutex);
719 if (!cfile->invalidHandle) {
720 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400721 free_xid(xid);
Al Virof6a9bc32021-03-05 17:36:04 -0500722 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 }
724
David Howells2b0143b2015-03-17 22:25:59 +0000725 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700727 tcon = tlink_tcon(cfile->tlink);
728 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000729
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700730 /*
731 * Can not grab rename sem here because various ops, including those
732 * that already have the rename sem can end up causing writepage to get
733 * called and if the server was down that means we end up here, and we
734 * can never tell if the caller already has the rename_sem.
735 */
Al Virof6a9bc32021-03-05 17:36:04 -0500736 page = alloc_dentry_path();
737 full_path = build_path_from_dentry(cfile->dentry, page);
738 if (IS_ERR(full_path)) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700739 mutex_unlock(&cfile->fh_mutex);
Al Virof6a9bc32021-03-05 17:36:04 -0500740 free_dentry_path(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400741 free_xid(xid);
Al Virof6a9bc32021-03-05 17:36:04 -0500742 return PTR_ERR(full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 }
744
Joe Perchesf96637b2013-05-04 22:12:25 -0500745 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
746 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300748 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 oplock = REQ_OPLOCK;
750 else
Steve French4b18f2a2008-04-29 00:06:05 +0000751 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400753 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000754 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400755 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400756 /*
757 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
758 * original open. Must mask them off for a reopen.
759 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700760 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400761 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400762
Jeff Layton2422f672010-06-16 13:40:16 -0400763 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Ronnie Sahlberg8401e932020-12-12 13:40:50 -0600764 cifs_sb->ctx->file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400765 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000766 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500767 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200768 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000769 goto reopen_success;
770 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700771 /*
772 * fallthrough to retry open the old way on errors, especially
773 * in the reconnect path it is important to retry hard
774 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000775 }
776
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700777 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000778
Pavel Shilovsky44805b02019-11-12 17:16:35 -0800779 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
780 if (cfile->f_flags & O_SYNC)
781 create_options |= CREATE_WRITE_THROUGH;
782
783 if (cfile->f_flags & O_DIRECT)
784 create_options |= CREATE_NO_BUFFER;
785
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700786 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400787 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700788
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400789 oparms.tcon = tcon;
790 oparms.cifs_sb = cifs_sb;
791 oparms.desired_access = desired_access;
Amir Goldstein0f060932020-02-03 21:46:43 +0200792 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400793 oparms.disposition = disposition;
794 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400795 oparms.fid = &cfile->fid;
796 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400797
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700798 /*
799 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400800 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700801 * file might have write behind data that needs to be flushed and server
802 * version of file size can be stale. If we knew for sure that inode was
803 * not dirty locally we could do this.
804 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400805 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400806 if (rc == -ENOENT && oparms.reconnect == false) {
807 /* durable handle timeout is expired - open the file again */
808 rc = server->ops->open(xid, &oparms, &oplock, NULL);
809 /* indicate that we need to relock the file */
810 oparms.reconnect = true;
811 }
812
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700814 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500815 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
816 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400817 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 }
Jeff Layton15886172010-10-15 15:33:59 -0400819
820reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700821 cfile->invalidHandle = false;
822 mutex_unlock(&cfile->fh_mutex);
823 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400824
825 if (can_flush) {
826 rc = filemap_write_and_wait(inode->i_mapping);
Pavel Shilovsky9a663962019-01-08 11:15:28 -0800827 if (!is_interrupt_error(rc))
828 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400829
Steve Frenchd3138522020-06-11 22:43:01 -0500830 if (tcon->posix_extensions)
831 rc = smb311_posix_get_inode_info(&inode, full_path, inode->i_sb, xid);
832 else if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700833 rc = cifs_get_inode_info_unix(&inode, full_path,
834 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400835 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700836 rc = cifs_get_inode_info(&inode, full_path, NULL,
837 inode->i_sb, xid, NULL);
838 }
839 /*
840 * Else we are writing out data to server already and could deadlock if
841 * we tried to flush data, and since we do not know if we have data that
842 * would invalidate the current end of file on the server we can not go
843 * to the server to get the new inode info.
844 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300845
Pavel Shilovskyde740252016-10-11 15:34:07 -0700846 /*
847 * If the server returned a read oplock and we have mandatory brlocks,
848 * set oplock level to None.
849 */
850 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
851 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
852 oplock = 0;
853 }
854
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400855 server->ops->set_fid(cfile, &cfile->fid, oplock);
856 if (oparms.reconnect)
857 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400858
859reopen_error_exit:
Al Virof6a9bc32021-03-05 17:36:04 -0500860 free_dentry_path(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400861 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 return rc;
863}
864
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500865void smb2_deferred_work_close(struct work_struct *work)
866{
867 struct cifsFileInfo *cfile = container_of(work,
868 struct cifsFileInfo, deferred.work);
869
870 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
871 cifs_del_deferred_close(cfile);
Rohith Surabattula860b69a2021-05-05 10:56:47 +0000872 cfile->deferred_close_scheduled = false;
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500873 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
874 _cifsFileInfo_put(cfile, true, false);
875}
876
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877int cifs_close(struct inode *inode, struct file *file)
878{
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500879 struct cifsFileInfo *cfile;
880 struct cifsInodeInfo *cinode = CIFS_I(inode);
881 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
882 struct cifs_deferred_close *dclose;
883
David Howells70431bf2020-11-17 15:56:59 +0000884 cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
885
Jeff Layton77970692011-04-05 16:23:47 -0700886 if (file->private_data != NULL) {
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500887 cfile = file->private_data;
Jeff Layton77970692011-04-05 16:23:47 -0700888 file->private_data = NULL;
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500889 dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
890 if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
Rohith Surabattula0ab95c22021-05-17 11:28:34 +0000891 cinode->lease_granted &&
Rohith Surabattula35866f32021-09-17 16:50:40 -0500892 !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500893 dclose) {
Steve French4f222622021-09-23 12:42:35 -0500894 if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500895 inode->i_ctime = inode->i_mtime = current_time(inode);
Shyam Prasad N18d04062021-08-10 10:22:28 +0000896 }
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500897 spin_lock(&cinode->deferred_lock);
898 cifs_add_deferred_close(cfile, dclose);
Rohith Surabattula860b69a2021-05-05 10:56:47 +0000899 if (cfile->deferred_close_scheduled &&
900 delayed_work_pending(&cfile->deferred)) {
Rohith Surabattula9687c852021-05-20 16:45:01 +0000901 /*
902 * If there is no pending work, mod_delayed_work queues new work.
903 * So, Increase the ref count to avoid use-after-free.
904 */
905 if (!mod_delayed_work(deferredclose_wq,
906 &cfile->deferred, cifs_sb->ctx->acregmax))
907 cifsFileInfo_get(cfile);
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500908 } else {
909 /* Deferred close for files */
910 queue_delayed_work(deferredclose_wq,
911 &cfile->deferred, cifs_sb->ctx->acregmax);
Rohith Surabattula860b69a2021-05-05 10:56:47 +0000912 cfile->deferred_close_scheduled = true;
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500913 spin_unlock(&cinode->deferred_lock);
914 return 0;
915 }
916 spin_unlock(&cinode->deferred_lock);
917 _cifsFileInfo_put(cfile, true, false);
918 } else {
919 _cifsFileInfo_put(cfile, true, false);
920 kfree(dclose);
921 }
Jeff Layton77970692011-04-05 16:23:47 -0700922 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923
Steve Frenchcdff08e2010-10-21 22:46:14 +0000924 /* return code from the ->release op is always ignored */
925 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926}
927
Steve French52ace1e2016-09-22 19:23:56 -0500928void
929cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
930{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700931 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500932 struct list_head *tmp;
933 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700934 struct list_head tmp_list;
935
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800936 if (!tcon->use_persistent || !tcon->need_reopen_files)
937 return;
938
939 tcon->need_reopen_files = false;
940
Joe Perchesa0a30362020-04-14 22:42:53 -0700941 cifs_dbg(FYI, "Reopen persistent handles\n");
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700942 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500943
944 /* list all files open on tree connection, reopen resilient handles */
945 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700946 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500947 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700948 if (!open_file->invalidHandle)
949 continue;
950 cifsFileInfo_get(open_file);
951 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500952 }
953 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700954
955 list_for_each_safe(tmp, tmp1, &tmp_list) {
956 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800957 if (cifs_reopen_file(open_file, false /* do not flush */))
958 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700959 list_del_init(&open_file->rlist);
960 cifsFileInfo_put(open_file);
961 }
Steve French52ace1e2016-09-22 19:23:56 -0500962}
963
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964int cifs_closedir(struct inode *inode, struct file *file)
965{
966 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400967 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700968 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700969 struct cifs_tcon *tcon;
970 struct TCP_Server_Info *server;
971 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
Joe Perchesf96637b2013-05-04 22:12:25 -0500973 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700975 if (cfile == NULL)
976 return rc;
977
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400978 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700979 tcon = tlink_tcon(cfile->tlink);
980 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
Joe Perchesf96637b2013-05-04 22:12:25 -0500982 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500983 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400984 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700985 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500986 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700987 if (server->ops->close_dir)
988 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
989 else
990 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500991 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700992 /* not much we can do if it fails anyway, ignore rc */
993 rc = 0;
994 } else
Steve French3afca262016-09-22 18:58:16 -0500995 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700997 buf = cfile->srch_inf.ntwrk_buf_start;
998 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500999 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -07001000 cfile->srch_inf.ntwrk_buf_start = NULL;
1001 if (cfile->srch_inf.smallBuf)
1002 cifs_small_buf_release(buf);
1003 else
1004 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -07001006
1007 cifs_put_tlink(cfile->tlink);
1008 kfree(file->private_data);
1009 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001011 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 return rc;
1013}
1014
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001015static struct cifsLockInfo *
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001016cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001017{
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001018 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001019 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001020 if (!lock)
1021 return lock;
1022 lock->offset = offset;
1023 lock->length = length;
1024 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001025 lock->pid = current->tgid;
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001026 lock->flags = flags;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001027 INIT_LIST_HEAD(&lock->blist);
1028 init_waitqueue_head(&lock->block_q);
1029 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001030}
1031
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001032void
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001033cifs_del_lock_waiters(struct cifsLockInfo *lock)
1034{
1035 struct cifsLockInfo *li, *tmp;
1036 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1037 list_del_init(&li->blist);
1038 wake_up(&li->block_q);
1039 }
1040}
1041
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001042#define CIFS_LOCK_OP 0
1043#define CIFS_READ_OP 1
1044#define CIFS_WRITE_OP 2
1045
1046/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001047static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001048cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001049 __u64 length, __u8 type, __u16 flags,
1050 struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001051 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001052{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001053 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001054 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001055 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001056
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001057 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001058 if (offset + length <= li->offset ||
1059 offset >= li->offset + li->length)
1060 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001061 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1062 server->ops->compare_fids(cfile, cur_cfile)) {
1063 /* shared lock prevents write op through the same fid */
1064 if (!(li->type & server->vals->shared_lock_type) ||
1065 rw_check != CIFS_WRITE_OP)
1066 continue;
1067 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001068 if ((type & server->vals->shared_lock_type) &&
1069 ((server->ops->compare_fids(cfile, cur_cfile) &&
1070 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001071 continue;
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001072 if (rw_check == CIFS_LOCK_OP &&
1073 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1074 server->ops->compare_fids(cfile, cur_cfile))
1075 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -07001076 if (conf_lock)
1077 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001078 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001079 }
1080 return false;
1081}
1082
Pavel Shilovsky579f9052012-09-19 06:22:44 -07001083bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001084cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001085 __u8 type, __u16 flags,
1086 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001087{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001088 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001089 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +00001090 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001091
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001092 list_for_each_entry(cur, &cinode->llist, llist) {
1093 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001094 flags, cfile, conf_lock,
1095 rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001096 if (rc)
1097 break;
1098 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001099
1100 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001101}
1102
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001103/*
1104 * Check if there is another lock that prevents us to set the lock (mandatory
1105 * style). If such a lock exists, update the flock structure with its
1106 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1107 * or leave it the same if we can't. Returns 0 if we don't need to request to
1108 * the server or 1 otherwise.
1109 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001110static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001111cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1112 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001113{
1114 int rc = 0;
1115 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001116 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001117 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001118 bool exist;
1119
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001120 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001121
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001122 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001123 flock->fl_flags, &conf_lock,
1124 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001125 if (exist) {
1126 flock->fl_start = conf_lock->offset;
1127 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1128 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001129 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001130 flock->fl_type = F_RDLCK;
1131 else
1132 flock->fl_type = F_WRLCK;
1133 } else if (!cinode->can_cache_brlcks)
1134 rc = 1;
1135 else
1136 flock->fl_type = F_UNLCK;
1137
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001138 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001139 return rc;
1140}
1141
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001142static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001143cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001144{
David Howells2b0143b2015-03-17 22:25:59 +00001145 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001146 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001147 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001148 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001149}
1150
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001151/*
1152 * Set the byte-range lock (mandatory style). Returns:
1153 * 1) 0, if we set the lock and don't need to request to the server;
1154 * 2) 1, if no locks prevent us but we need to request to the server;
Colin Ian King413d6102018-10-26 19:07:21 +01001155 * 3) -EACCES, if there is a lock that prevents us and wait is false.
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001156 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001157static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001158cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001159 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001160{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001161 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001162 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001163 bool exist;
1164 int rc = 0;
1165
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001166try_again:
1167 exist = false;
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001168 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001169
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001170 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001171 lock->type, lock->flags, &conf_lock,
1172 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001173 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001174 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001175 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001176 return rc;
1177 }
1178
1179 if (!exist)
1180 rc = 1;
1181 else if (!wait)
1182 rc = -EACCES;
1183 else {
1184 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001185 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001186 rc = wait_event_interruptible(lock->block_q,
1187 (lock->blist.prev == &lock->blist) &&
1188 (lock->blist.next == &lock->blist));
1189 if (!rc)
1190 goto try_again;
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001191 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001192 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001193 }
1194
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001195 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001196 return rc;
1197}
1198
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001199/*
1200 * Check if there is another lock that prevents us to set the lock (posix
1201 * style). If such a lock exists, update the flock structure with its
1202 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1203 * or leave it the same if we can't. Returns 0 if we don't need to request to
1204 * the server or 1 otherwise.
1205 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001206static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001207cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1208{
1209 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001210 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001211 unsigned char saved_type = flock->fl_type;
1212
Pavel Shilovsky50792762011-10-29 17:17:57 +04001213 if ((flock->fl_flags & FL_POSIX) == 0)
1214 return 1;
1215
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001216 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001217 posix_test_lock(file, flock);
1218
1219 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1220 flock->fl_type = saved_type;
1221 rc = 1;
1222 }
1223
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001224 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001225 return rc;
1226}
1227
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001228/*
1229 * Set the byte-range lock (posix style). Returns:
yangerkun2e98c0182020-07-02 15:25:26 +08001230 * 1) <0, if the error occurs while setting the lock;
1231 * 2) 0, if we set the lock and don't need to request to the server;
1232 * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1233 * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001234 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001235static int
1236cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1237{
Al Viro496ad9a2013-01-23 17:07:38 -05001238 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
yangerkun2e98c0182020-07-02 15:25:26 +08001239 int rc = FILE_LOCK_DEFERRED + 1;
Pavel Shilovsky50792762011-10-29 17:17:57 +04001240
1241 if ((flock->fl_flags & FL_POSIX) == 0)
1242 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001243
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001244 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001245 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001246 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001247 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001248 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001249
1250 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001251 up_write(&cinode->lock_sem);
Steve French9ebb3892012-04-01 13:52:54 -05001252 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001253}
1254
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001255int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001256cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001257{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001258 unsigned int xid;
1259 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001260 struct cifsLockInfo *li, *tmp;
1261 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001262 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001263 LOCKING_ANDX_RANGE *buf, *cur;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001264 static const int types[] = {
1265 LOCKING_ANDX_LARGE_FILES,
1266 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1267 };
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001268 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001269
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001270 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001271 tcon = tlink_tcon(cfile->tlink);
1272
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001273 /*
1274 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001275 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001276 */
1277 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001278 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001279 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001280 return -EINVAL;
1281 }
1282
Ross Lagerwall92a81092019-01-08 18:30:56 +00001283 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1284 PAGE_SIZE);
1285 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1286 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001287 max_num = (max_buf - sizeof(struct smb_hdr)) /
1288 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001289 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001290 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001291 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001292 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001293 }
1294
1295 for (i = 0; i < 2; i++) {
1296 cur = buf;
1297 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001298 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001299 if (li->type != types[i])
1300 continue;
1301 cur->Pid = cpu_to_le16(li->pid);
1302 cur->LengthLow = cpu_to_le32((u32)li->length);
1303 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1304 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1305 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1306 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001307 stored_rc = cifs_lockv(xid, tcon,
1308 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001309 (__u8)li->type, 0, num,
1310 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001311 if (stored_rc)
1312 rc = stored_rc;
1313 cur = buf;
1314 num = 0;
1315 } else
1316 cur++;
1317 }
1318
1319 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001320 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001321 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001322 if (stored_rc)
1323 rc = stored_rc;
1324 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001325 }
1326
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001327 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001328 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001329 return rc;
1330}
1331
Jeff Layton3d224622016-05-24 06:27:44 -04001332static __u32
1333hash_lockowner(fl_owner_t owner)
1334{
1335 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1336}
1337
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001338struct lock_to_push {
1339 struct list_head llist;
1340 __u64 offset;
1341 __u64 length;
1342 __u32 pid;
1343 __u16 netfid;
1344 __u8 type;
1345};
1346
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001347static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001348cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001349{
David Howells2b0143b2015-03-17 22:25:59 +00001350 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001351 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001352 struct file_lock *flock;
1353 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001354 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001355 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001356 struct list_head locks_to_send, *el;
1357 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001358 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001359
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001360 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001361
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001362 if (!flctx)
1363 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001364
Jeff Laytone084c1b2015-02-16 14:32:03 -05001365 spin_lock(&flctx->flc_lock);
1366 list_for_each(el, &flctx->flc_posix) {
1367 count++;
1368 }
1369 spin_unlock(&flctx->flc_lock);
1370
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001371 INIT_LIST_HEAD(&locks_to_send);
1372
1373 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001374 * Allocating count locks is enough because no FL_POSIX locks can be
1375 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001376 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001377 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001378 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001379 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1380 if (!lck) {
1381 rc = -ENOMEM;
1382 goto err_out;
1383 }
1384 list_add_tail(&lck->llist, &locks_to_send);
1385 }
1386
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001387 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001388 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001389 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001390 if (el == &locks_to_send) {
1391 /*
1392 * The list ended. We don't have enough allocated
1393 * structures - something is really wrong.
1394 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001395 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001396 break;
1397 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001398 length = 1 + flock->fl_end - flock->fl_start;
1399 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1400 type = CIFS_RDLCK;
1401 else
1402 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001403 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001404 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001405 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001406 lck->length = length;
1407 lck->type = type;
1408 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001409 }
Jeff Layton6109c852015-01-16 15:05:57 -05001410 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001411
1412 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001413 int stored_rc;
1414
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001415 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001416 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001417 lck->type, 0);
1418 if (stored_rc)
1419 rc = stored_rc;
1420 list_del(&lck->llist);
1421 kfree(lck);
1422 }
1423
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001424out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001425 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001426 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001427err_out:
1428 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1429 list_del(&lck->llist);
1430 kfree(lck);
1431 }
1432 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001433}
1434
1435static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001436cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001437{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001438 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001439 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001440 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001441 int rc = 0;
1442
1443 /* we are going to update can_cache_brlcks here - need a write access */
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001444 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001445 if (!cinode->can_cache_brlcks) {
1446 up_write(&cinode->lock_sem);
1447 return rc;
1448 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001449
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001450 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001451 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1452 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001453 rc = cifs_push_posix_locks(cfile);
1454 else
1455 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001456
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001457 cinode->can_cache_brlcks = false;
1458 up_write(&cinode->lock_sem);
1459 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001460}
1461
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001462static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001463cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001464 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001466 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001467 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001468 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001469 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001470 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001471 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001472 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001474 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001475 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001476 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001477 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001478 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001479 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001480 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001481 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001483 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001484 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001485 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001486 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001487 *lock = 1;
1488 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001489 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001490 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001491 *unlock = 1;
1492 /* Check if unlock includes more than one lock range */
1493 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001494 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001495 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001496 *lock = 1;
1497 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001498 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001499 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001500 *lock = 1;
1501 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001502 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001503 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001504 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001506 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001507}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001509static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001510cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001511 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001512{
1513 int rc = 0;
1514 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001515 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1516 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001517 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001518 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001520 if (posix_lck) {
1521 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001522
1523 rc = cifs_posix_lock_test(file, flock);
1524 if (!rc)
1525 return rc;
1526
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001527 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001528 posix_lock_type = CIFS_RDLCK;
1529 else
1530 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001531 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1532 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001533 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001534 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 return rc;
1536 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001537
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001538 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001539 if (!rc)
1540 return rc;
1541
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001542 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001543 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1544 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001545 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001546 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1547 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001548 flock->fl_type = F_UNLCK;
1549 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001550 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1551 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001552 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001553 }
1554
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001555 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001556 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001557 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001558 }
1559
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001560 type &= ~server->vals->exclusive_lock_type;
1561
1562 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1563 type | server->vals->shared_lock_type,
1564 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001565 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001566 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1567 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001568 flock->fl_type = F_RDLCK;
1569 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001570 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1571 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001572 } else
1573 flock->fl_type = F_WRLCK;
1574
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001575 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001576}
1577
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001578void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001579cifs_move_llist(struct list_head *source, struct list_head *dest)
1580{
1581 struct list_head *li, *tmp;
1582 list_for_each_safe(li, tmp, source)
1583 list_move(li, dest);
1584}
1585
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001586void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001587cifs_free_llist(struct list_head *llist)
1588{
1589 struct cifsLockInfo *li, *tmp;
1590 list_for_each_entry_safe(li, tmp, llist, llist) {
1591 cifs_del_lock_waiters(li);
1592 list_del(&li->llist);
1593 kfree(li);
1594 }
1595}
1596
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001597int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001598cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1599 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001600{
1601 int rc = 0, stored_rc;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001602 static const int types[] = {
1603 LOCKING_ANDX_LARGE_FILES,
1604 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1605 };
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001606 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001607 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001608 LOCKING_ANDX_RANGE *buf, *cur;
1609 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001610 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001611 struct cifsLockInfo *li, *tmp;
1612 __u64 length = 1 + flock->fl_end - flock->fl_start;
1613 struct list_head tmp_llist;
1614
1615 INIT_LIST_HEAD(&tmp_llist);
1616
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001617 /*
1618 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001619 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001620 */
1621 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001622 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001623 return -EINVAL;
1624
Ross Lagerwall92a81092019-01-08 18:30:56 +00001625 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1626 PAGE_SIZE);
1627 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1628 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001629 max_num = (max_buf - sizeof(struct smb_hdr)) /
1630 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001631 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001632 if (!buf)
1633 return -ENOMEM;
1634
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001635 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001636 for (i = 0; i < 2; i++) {
1637 cur = buf;
1638 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001639 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001640 if (flock->fl_start > li->offset ||
1641 (flock->fl_start + length) <
1642 (li->offset + li->length))
1643 continue;
1644 if (current->tgid != li->pid)
1645 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001646 if (types[i] != li->type)
1647 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001648 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001649 /*
1650 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001651 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001652 */
1653 list_del(&li->llist);
1654 cifs_del_lock_waiters(li);
1655 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001656 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001657 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001658 cur->Pid = cpu_to_le16(li->pid);
1659 cur->LengthLow = cpu_to_le32((u32)li->length);
1660 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1661 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1662 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1663 /*
1664 * We need to save a lock here to let us add it again to
1665 * the file's list if the unlock range request fails on
1666 * the server.
1667 */
1668 list_move(&li->llist, &tmp_llist);
1669 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001670 stored_rc = cifs_lockv(xid, tcon,
1671 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001672 li->type, num, 0, buf);
1673 if (stored_rc) {
1674 /*
1675 * We failed on the unlock range
1676 * request - add all locks from the tmp
1677 * list to the head of the file's list.
1678 */
1679 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001680 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001681 rc = stored_rc;
1682 } else
1683 /*
1684 * The unlock range request succeed -
1685 * free the tmp list.
1686 */
1687 cifs_free_llist(&tmp_llist);
1688 cur = buf;
1689 num = 0;
1690 } else
1691 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001692 }
1693 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001694 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001695 types[i], num, 0, buf);
1696 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001697 cifs_move_llist(&tmp_llist,
1698 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001699 rc = stored_rc;
1700 } else
1701 cifs_free_llist(&tmp_llist);
1702 }
1703 }
1704
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001705 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001706 kfree(buf);
1707 return rc;
1708}
1709
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001710static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001711cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001712 bool wait_flag, bool posix_lck, int lock, int unlock,
1713 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001714{
1715 int rc = 0;
1716 __u64 length = 1 + flock->fl_end - flock->fl_start;
1717 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1718 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001719 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001720 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001721
1722 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001723 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001724
1725 rc = cifs_posix_lock_set(file, flock);
yangerkun2e98c0182020-07-02 15:25:26 +08001726 if (rc <= FILE_LOCK_DEFERRED)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001727 return rc;
1728
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001729 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001730 posix_lock_type = CIFS_RDLCK;
1731 else
1732 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001733
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001734 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001735 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001736
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001737 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001738 hash_lockowner(flock->fl_owner),
1739 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001740 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001741 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001742 }
1743
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001744 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001745 struct cifsLockInfo *lock;
1746
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001747 lock = cifs_lock_init(flock->fl_start, length, type,
1748 flock->fl_flags);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001749 if (!lock)
1750 return -ENOMEM;
1751
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001752 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001753 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001754 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001755 return rc;
1756 }
1757 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001758 goto out;
1759
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001760 /*
1761 * Windows 7 server can delay breaking lease from read to None
1762 * if we set a byte-range lock on a file - break it explicitly
1763 * before sending the lock to the server to be sure the next
1764 * read won't conflict with non-overlapted locks due to
1765 * pagereading.
1766 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001767 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1768 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001769 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001770 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1771 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001772 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001773 }
1774
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001775 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1776 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001777 if (rc) {
1778 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001779 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001780 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001781
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001782 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001783 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001784 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001785
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001786out:
Steve Frenchd0677992019-07-16 18:55:38 -05001787 if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) {
Aurelien Aptelbc31d0c2019-03-14 18:44:16 +01001788 /*
1789 * If this is a request to remove all locks because we
1790 * are closing the file, it doesn't matter if the
1791 * unlocking failed as both cifs.ko and the SMB server
1792 * remove the lock on file close
1793 */
1794 if (rc) {
1795 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1796 if (!(flock->fl_flags & FL_CLOSE))
1797 return rc;
1798 }
Benjamin Coddington4f656362015-10-22 13:38:14 -04001799 rc = locks_lock_file_wait(file, flock);
Aurelien Aptelbc31d0c2019-03-14 18:44:16 +01001800 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001801 return rc;
1802}
1803
Steve Frenchd0677992019-07-16 18:55:38 -05001804int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
1805{
1806 int rc, xid;
1807 int lock = 0, unlock = 0;
1808 bool wait_flag = false;
1809 bool posix_lck = false;
1810 struct cifs_sb_info *cifs_sb;
1811 struct cifs_tcon *tcon;
Steve Frenchd0677992019-07-16 18:55:38 -05001812 struct cifsFileInfo *cfile;
Steve Frenchd0677992019-07-16 18:55:38 -05001813 __u32 type;
1814
1815 rc = -EACCES;
1816 xid = get_xid();
1817
1818 if (!(fl->fl_flags & FL_FLOCK))
1819 return -ENOLCK;
1820
1821 cfile = (struct cifsFileInfo *)file->private_data;
1822 tcon = tlink_tcon(cfile->tlink);
1823
1824 cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
1825 tcon->ses->server);
1826 cifs_sb = CIFS_FILE_SB(file);
Steve Frenchd0677992019-07-16 18:55:38 -05001827
1828 if (cap_unix(tcon->ses) &&
1829 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1830 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1831 posix_lck = true;
1832
1833 if (!lock && !unlock) {
1834 /*
1835 * if no lock or unlock then nothing to do since we do not
1836 * know what it is
1837 */
1838 free_xid(xid);
1839 return -EOPNOTSUPP;
1840 }
1841
1842 rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
1843 xid);
1844 free_xid(xid);
1845 return rc;
1846
1847
1848}
1849
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001850int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1851{
1852 int rc, xid;
1853 int lock = 0, unlock = 0;
1854 bool wait_flag = false;
1855 bool posix_lck = false;
1856 struct cifs_sb_info *cifs_sb;
1857 struct cifs_tcon *tcon;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001858 struct cifsFileInfo *cfile;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001859 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001860
1861 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001862 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001863
Joe Perchesf96637b2013-05-04 22:12:25 -05001864 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1865 cmd, flock->fl_flags, flock->fl_type,
1866 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001867
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001868 cfile = (struct cifsFileInfo *)file->private_data;
1869 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001870
1871 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1872 tcon->ses->server);
Al Viro7119e222014-10-22 00:25:12 -04001873 cifs_sb = CIFS_FILE_SB(file);
Rohith Surabattula35866f32021-09-17 16:50:40 -05001874 set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001875
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001876 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001877 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1878 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1879 posix_lck = true;
1880 /*
1881 * BB add code here to normalize offset and length to account for
1882 * negative length which we can not accept over the wire.
1883 */
1884 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001885 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001886 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001887 return rc;
1888 }
1889
1890 if (!lock && !unlock) {
1891 /*
1892 * if no lock or unlock then nothing to do since we do not
1893 * know what it is
1894 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001895 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001896 return -EOPNOTSUPP;
1897 }
1898
1899 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1900 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001901 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 return rc;
1903}
1904
Jeff Layton597b0272012-03-23 14:40:56 -04001905/*
1906 * update the file size (if needed) after a write. Should be called with
1907 * the inode->i_lock held
1908 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001909void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001910cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1911 unsigned int bytes_written)
1912{
1913 loff_t end_of_write = offset + bytes_written;
1914
1915 if (end_of_write > cifsi->server_eof)
1916 cifsi->server_eof = end_of_write;
1917}
1918
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001919static ssize_t
1920cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1921 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922{
1923 int rc = 0;
1924 unsigned int bytes_written = 0;
1925 unsigned int total_written;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001926 struct cifs_tcon *tcon;
1927 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001928 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001929 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001930 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Aurelien Aptel7c065142020-06-04 17:23:55 +02001931 struct cifs_io_parms io_parms = {0};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932
Al Viro35c265e2014-08-19 20:25:34 -04001933 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1934 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001936 tcon = tlink_tcon(open_file->tlink);
1937 server = tcon->ses->server;
1938
1939 if (!server->ops->sync_write)
1940 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001941
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001942 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 for (total_written = 0; write_size > total_written;
1945 total_written += bytes_written) {
1946 rc = -EAGAIN;
1947 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001948 struct kvec iov[2];
1949 unsigned int len;
1950
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 /* we could deadlock if we called
1953 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001954 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001956 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 if (rc != 0)
1958 break;
1959 }
Steve French3e844692005-10-03 13:37:24 -07001960
David Howells2b0143b2015-03-17 22:25:59 +00001961 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001962 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001963 /* iov[0] is reserved for smb header */
1964 iov[1].iov_base = (char *)write_data + total_written;
1965 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001966 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001967 io_parms.tcon = tcon;
1968 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001969 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001970 rc = server->ops->sync_write(xid, &open_file->fid,
1971 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 }
1973 if (rc || (bytes_written == 0)) {
1974 if (total_written)
1975 break;
1976 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001977 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 return rc;
1979 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001980 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001981 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001982 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001983 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001984 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001985 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 }
1987
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001988 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989
Jeff Layton7da4b492010-10-15 15:34:00 -04001990 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001991 spin_lock(&d_inode(dentry)->i_lock);
Rohith Surabattula78c09632021-04-19 19:02:03 +00001992 if (*offset > d_inode(dentry)->i_size) {
David Howells2b0143b2015-03-17 22:25:59 +00001993 i_size_write(d_inode(dentry), *offset);
Rohith Surabattula78c09632021-04-19 19:02:03 +00001994 d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9;
1995 }
David Howells2b0143b2015-03-17 22:25:59 +00001996 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 }
David Howells2b0143b2015-03-17 22:25:59 +00001998 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001999 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 return total_written;
2001}
2002
Jeff Layton6508d902010-09-29 19:51:11 -04002003struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2004 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00002005{
2006 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04002007 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
2008
2009 /* only filter by fsuid on multiuser mounts */
2010 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2011 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00002012
Dave Wysochanskicb248812019-10-03 15:16:27 +10002013 spin_lock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00002014 /* we could simply get the first_list_entry since write-only entries
2015 are always at the end of the list but since the first entry might
2016 have a close pending, we go through the whole list */
2017 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08002018 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04002019 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04002020 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Rohith Surabattula860b69a2021-05-05 10:56:47 +00002021 if ((!open_file->invalidHandle)) {
Steve French630f3f0c2007-10-25 21:17:17 +00002022 /* found a good file */
2023 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05002024 cifsFileInfo_get(open_file);
Dave Wysochanskicb248812019-10-03 15:16:27 +10002025 spin_unlock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00002026 return open_file;
2027 } /* else might as well continue, and look for
2028 another, or simply have the caller reopen it
2029 again rather than trying to fix this handle */
2030 } else /* write only file */
2031 break; /* write only files are last so must be done */
2032 }
Dave Wysochanskicb248812019-10-03 15:16:27 +10002033 spin_unlock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00002034 return NULL;
2035}
Steve French630f3f0c2007-10-25 21:17:17 +00002036
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002037/* Return -EBADF if no handle is found and general rc otherwise */
2038int
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002039cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002040 struct cifsFileInfo **ret_file)
Steve French6148a742005-10-05 12:23:19 -07002041{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002042 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04002043 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04002044 bool any_available = false;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002045 int rc = -EBADF;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002046 unsigned int refind = 0;
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002047 bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2048 bool with_delete = flags & FIND_WR_WITH_DELETE;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002049 *ret_file = NULL;
2050
2051 /*
2052 * Having a null inode here (because mapping->host was set to zero by
2053 * the VFS or MM) should not happen but we had reports of on oops (due
2054 * to it being zero) during stress testcases so we need to check for it
2055 */
Steve French60808232006-04-22 15:53:05 +00002056
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002057 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002058 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00002059 dump_stack();
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002060 return rc;
Steve French60808232006-04-22 15:53:05 +00002061 }
2062
Jeff Laytond3892292010-11-02 16:22:50 -04002063 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
2064
Jeff Layton6508d902010-09-29 19:51:11 -04002065 /* only filter by fsuid on multiuser mounts */
2066 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2067 fsuid_only = false;
2068
Dave Wysochanskicb248812019-10-03 15:16:27 +10002069 spin_lock(&cifs_inode->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00002070refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002071 if (refind > MAX_REOPEN_ATT) {
Dave Wysochanskicb248812019-10-03 15:16:27 +10002072 spin_unlock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002073 return rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002074 }
Steve French6148a742005-10-05 12:23:19 -07002075 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04002076 if (!any_available && open_file->pid != current->tgid)
2077 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08002078 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04002079 continue;
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002080 if (with_delete && !(open_file->fid.access & DELETE))
2081 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04002082 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00002083 if (!open_file->invalidHandle) {
2084 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05002085 cifsFileInfo_get(open_file);
Dave Wysochanskicb248812019-10-03 15:16:27 +10002086 spin_unlock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002087 *ret_file = open_file;
2088 return 0;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002089 } else {
2090 if (!inv_file)
2091 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00002092 }
Steve French6148a742005-10-05 12:23:19 -07002093 }
2094 }
Jeff Layton2846d382008-09-22 21:33:33 -04002095 /* couldn't find useable FH with same pid, try any available */
2096 if (!any_available) {
2097 any_available = true;
2098 goto refind_writable;
2099 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002100
2101 if (inv_file) {
2102 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05002103 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002104 }
2105
Dave Wysochanskicb248812019-10-03 15:16:27 +10002106 spin_unlock(&cifs_inode->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002107
2108 if (inv_file) {
2109 rc = cifs_reopen_file(inv_file, false);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002110 if (!rc) {
2111 *ret_file = inv_file;
2112 return 0;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002113 }
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002114
Ronnie Sahlberg487317c2019-06-05 10:38:38 +10002115 spin_lock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002116 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
Ronnie Sahlberg487317c2019-06-05 10:38:38 +10002117 spin_unlock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002118 cifsFileInfo_put(inv_file);
2119 ++refind;
2120 inv_file = NULL;
Dave Wysochanskicb248812019-10-03 15:16:27 +10002121 spin_lock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002122 goto refind_writable;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002123 }
2124
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002125 return rc;
2126}
2127
2128struct cifsFileInfo *
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002129find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002130{
2131 struct cifsFileInfo *cfile;
2132 int rc;
2133
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002134 rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002135 if (rc)
Joe Perchesa0a30362020-04-14 22:42:53 -07002136 cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002137
2138 return cfile;
Steve French6148a742005-10-05 12:23:19 -07002139}
2140
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002141int
2142cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002143 int flags,
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002144 struct cifsFileInfo **ret_file)
2145{
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002146 struct cifsFileInfo *cfile;
Al Virof6a9bc32021-03-05 17:36:04 -05002147 void *page = alloc_dentry_path();
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002148
2149 *ret_file = NULL;
2150
2151 spin_lock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002152 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2153 struct cifsInodeInfo *cinode;
2154 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2155 if (IS_ERR(full_path)) {
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002156 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002157 free_dentry_path(page);
2158 return PTR_ERR(full_path);
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002159 }
Al Virof6a9bc32021-03-05 17:36:04 -05002160 if (strcmp(full_path, name))
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002161 continue;
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002162
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002163 cinode = CIFS_I(d_inode(cfile->dentry));
2164 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002165 free_dentry_path(page);
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002166 return cifs_get_writable_file(cinode, flags, ret_file);
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002167 }
2168
2169 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002170 free_dentry_path(page);
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002171 return -ENOENT;
2172}
2173
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002174int
2175cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2176 struct cifsFileInfo **ret_file)
2177{
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002178 struct cifsFileInfo *cfile;
Al Virof6a9bc32021-03-05 17:36:04 -05002179 void *page = alloc_dentry_path();
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002180
2181 *ret_file = NULL;
2182
2183 spin_lock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002184 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2185 struct cifsInodeInfo *cinode;
2186 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2187 if (IS_ERR(full_path)) {
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002188 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002189 free_dentry_path(page);
2190 return PTR_ERR(full_path);
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002191 }
Al Virof6a9bc32021-03-05 17:36:04 -05002192 if (strcmp(full_path, name))
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002193 continue;
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002194
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002195 cinode = CIFS_I(d_inode(cfile->dentry));
2196 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002197 free_dentry_path(page);
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002198 *ret_file = find_readable_file(cinode, 0);
2199 return *ret_file ? 0 : -ENOENT;
2200 }
2201
2202 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002203 free_dentry_path(page);
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002204 return -ENOENT;
2205}
2206
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
2208{
2209 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002210 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 char *write_data;
2212 int rc = -EFAULT;
2213 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07002215 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216
2217 if (!mapping || !mapping->host)
2218 return -EFAULT;
2219
2220 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221
2222 offset += (loff_t)from;
2223 write_data = kmap(page);
2224 write_data += from;
2225
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002226 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 kunmap(page);
2228 return -EIO;
2229 }
2230
2231 /* racing with truncate? */
2232 if (offset > mapping->host->i_size) {
2233 kunmap(page);
2234 return 0; /* don't care */
2235 }
2236
2237 /* check to make sure that we are not extending the file */
2238 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002239 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002241 rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
2242 &open_file);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002243 if (!rc) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04002244 bytes_written = cifs_write(open_file, open_file->pid,
2245 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04002246 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07002248 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002249 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07002250 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002251 else if (bytes_written < 0)
2252 rc = bytes_written;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002253 else
2254 rc = -EFAULT;
Steve French6148a742005-10-05 12:23:19 -07002255 } else {
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002256 cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
2257 if (!is_retryable_error(rc))
2258 rc = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 }
2260
2261 kunmap(page);
2262 return rc;
2263}
2264
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002265static struct cifs_writedata *
2266wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2267 pgoff_t end, pgoff_t *index,
2268 unsigned int *found_pages)
2269{
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002270 struct cifs_writedata *wdata;
2271
2272 wdata = cifs_writedata_alloc((unsigned int)tofind,
2273 cifs_writev_complete);
2274 if (!wdata)
2275 return NULL;
2276
Jan Kara9c19a9c2017-11-15 17:35:26 -08002277 *found_pages = find_get_pages_range_tag(mapping, index, end,
2278 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002279 return wdata;
2280}
2281
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002282static unsigned int
2283wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2284 struct address_space *mapping,
2285 struct writeback_control *wbc,
2286 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2287{
2288 unsigned int nr_pages = 0, i;
2289 struct page *page;
2290
2291 for (i = 0; i < found_pages; i++) {
2292 page = wdata->pages[i];
2293 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002294 * At this point we hold neither the i_pages lock nor the
2295 * page lock: the page may be truncated or invalidated
2296 * (changing page->mapping to NULL), or even swizzled
2297 * back from swapper_space to tmpfs file mapping
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002298 */
2299
2300 if (nr_pages == 0)
2301 lock_page(page);
2302 else if (!trylock_page(page))
2303 break;
2304
2305 if (unlikely(page->mapping != mapping)) {
2306 unlock_page(page);
2307 break;
2308 }
2309
2310 if (!wbc->range_cyclic && page->index > end) {
2311 *done = true;
2312 unlock_page(page);
2313 break;
2314 }
2315
2316 if (*next && (page->index != *next)) {
2317 /* Not next consecutive page */
2318 unlock_page(page);
2319 break;
2320 }
2321
2322 if (wbc->sync_mode != WB_SYNC_NONE)
2323 wait_on_page_writeback(page);
2324
2325 if (PageWriteback(page) ||
2326 !clear_page_dirty_for_io(page)) {
2327 unlock_page(page);
2328 break;
2329 }
2330
2331 /*
2332 * This actually clears the dirty bit in the radix tree.
2333 * See cifs_writepage() for more commentary.
2334 */
2335 set_page_writeback(page);
2336 if (page_offset(page) >= i_size_read(mapping->host)) {
2337 *done = true;
2338 unlock_page(page);
2339 end_page_writeback(page);
2340 break;
2341 }
2342
2343 wdata->pages[i] = page;
2344 *next = page->index + 1;
2345 ++nr_pages;
2346 }
2347
2348 /* reset index to refind any pages skipped */
2349 if (nr_pages == 0)
2350 *index = wdata->pages[0]->index + 1;
2351
2352 /* put any pages we aren't going to use */
2353 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002354 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002355 wdata->pages[i] = NULL;
2356 }
2357
2358 return nr_pages;
2359}
2360
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002361static int
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002362wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2363 struct address_space *mapping, struct writeback_control *wbc)
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002364{
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002365 int rc;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002366
2367 wdata->sync_mode = wbc->sync_mode;
2368 wdata->nr_pages = nr_pages;
2369 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002370 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002371 wdata->tailsz = min(i_size_read(mapping->host) -
2372 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002373 (loff_t)PAGE_SIZE);
2374 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002375 wdata->pid = wdata->cfile->pid;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002376
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002377 rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002378 if (rc)
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002379 return rc;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002380
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002381 if (wdata->cfile->invalidHandle)
2382 rc = -EAGAIN;
2383 else
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002384 rc = wdata->server->ops->async_writev(wdata,
2385 cifs_writedata_release);
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002386
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002387 return rc;
2388}
2389
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002391 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392{
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002393 struct inode *inode = mapping->host;
2394 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002395 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002396 bool done = false, scanned = false, range_whole = false;
2397 pgoff_t end, index;
2398 struct cifs_writedata *wdata;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002399 struct cifsFileInfo *cfile = NULL;
Steve French37c0eb42005-10-05 14:50:29 -07002400 int rc = 0;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002401 int saved_rc = 0;
Steve French0cb012d2018-10-11 01:01:02 -05002402 unsigned int xid;
Steve French50c2f752007-07-13 00:33:32 +00002403
Steve French37c0eb42005-10-05 14:50:29 -07002404 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002405 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002406 * one page at a time via cifs_writepage
2407 */
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10002408 if (cifs_sb->ctx->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002409 return generic_writepages(mapping, wbc);
2410
Steve French0cb012d2018-10-11 01:01:02 -05002411 xid = get_xid();
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002412 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002413 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002414 end = -1;
2415 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002416 index = wbc->range_start >> PAGE_SHIFT;
2417 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002418 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002419 range_whole = true;
2420 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002421 }
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002422 server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses);
2423
Steve French37c0eb42005-10-05 14:50:29 -07002424retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002425 while (!done && index <= end) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002426 unsigned int i, nr_pages, found_pages, wsize;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002427 pgoff_t next = 0, tofind, saved_index = index;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002428 struct cifs_credits credits_on_stack;
2429 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002430 int get_file_rc = 0;
Steve French37c0eb42005-10-05 14:50:29 -07002431
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002432 if (cfile)
2433 cifsFileInfo_put(cfile);
2434
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002435 rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002436
2437 /* in case of an error store it to return later */
2438 if (rc)
2439 get_file_rc = rc;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002440
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10002441 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002442 &wsize, credits);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002443 if (rc != 0) {
2444 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002445 break;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002446 }
Steve French37c0eb42005-10-05 14:50:29 -07002447
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002448 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002449
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002450 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2451 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002452 if (!wdata) {
2453 rc = -ENOMEM;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002454 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002455 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002456 break;
2457 }
2458
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002459 if (found_pages == 0) {
2460 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002461 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002462 break;
2463 }
2464
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002465 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2466 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002467
2468 /* nothing to write? */
2469 if (nr_pages == 0) {
2470 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002471 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002472 continue;
2473 }
2474
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002475 wdata->credits = credits_on_stack;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002476 wdata->cfile = cfile;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002477 wdata->server = server;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002478 cfile = NULL;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002479
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002480 if (!wdata->cfile) {
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002481 cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
2482 get_file_rc);
2483 if (is_retryable_error(get_file_rc))
2484 rc = get_file_rc;
2485 else
2486 rc = -EBADF;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002487 } else
2488 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002489
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002490 for (i = 0; i < nr_pages; ++i)
2491 unlock_page(wdata->pages[i]);
2492
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002493 /* send failure -- clean up the mess */
2494 if (rc != 0) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002495 add_credits_and_wake_if(server, &wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002496 for (i = 0; i < nr_pages; ++i) {
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002497 if (is_retryable_error(rc))
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002498 redirty_page_for_writepage(wbc,
2499 wdata->pages[i]);
2500 else
2501 SetPageError(wdata->pages[i]);
2502 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002503 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002504 }
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002505 if (!is_retryable_error(rc))
Jeff Layton941b8532011-01-11 07:24:01 -05002506 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002507 }
2508 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002509
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002510 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2511 index = saved_index;
2512 continue;
2513 }
2514
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002515 /* Return immediately if we received a signal during writing */
2516 if (is_interrupt_error(rc)) {
2517 done = true;
2518 break;
2519 }
2520
2521 if (rc != 0 && saved_rc == 0)
2522 saved_rc = rc;
2523
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002524 wbc->nr_to_write -= nr_pages;
2525 if (wbc->nr_to_write <= 0)
2526 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002527
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002528 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002529 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002530
Steve French37c0eb42005-10-05 14:50:29 -07002531 if (!scanned && !done) {
2532 /*
2533 * We hit the last page and there is more work to be done: wrap
2534 * back to the start of the file
2535 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002536 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002537 index = 0;
2538 goto retry;
2539 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002540
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002541 if (saved_rc != 0)
2542 rc = saved_rc;
2543
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002544 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002545 mapping->writeback_index = index;
2546
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002547 if (cfile)
2548 cifsFileInfo_put(cfile);
Steve French0cb012d2018-10-11 01:01:02 -05002549 free_xid(xid);
Rohith Surabattulac3f207a2021-04-13 00:26:42 -05002550 /* Indication to update ctime and mtime as close is deferred */
2551 set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 return rc;
2553}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002555static int
2556cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002558 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002559 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002561 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002563 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002564 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002565 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002566
2567 /*
2568 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2569 *
2570 * A writepage() implementation always needs to do either this,
2571 * or re-dirty the page with "redirty_page_for_writepage()" in
2572 * the case of a failure.
2573 *
2574 * Just unlocking the page will cause the radix tree tag-bits
2575 * to fail to update with the state of the page correctly.
2576 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002577 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002578retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002579 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002580 if (is_retryable_error(rc)) {
2581 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
Jeff Layton97b37f22017-05-25 06:59:52 -04002582 goto retry_write;
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002583 redirty_page_for_writepage(wbc, page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002584 } else if (rc != 0) {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002585 SetPageError(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002586 mapping_set_error(page->mapping, rc);
2587 } else {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002588 SetPageUptodate(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002589 }
Linus Torvaldscb876f42006-12-23 16:19:07 -08002590 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002591 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002592 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 return rc;
2594}
2595
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002596static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2597{
2598 int rc = cifs_writepage_locked(page, wbc);
2599 unlock_page(page);
2600 return rc;
2601}
2602
Nick Piggind9414772008-09-24 11:32:59 -04002603static int cifs_write_end(struct file *file, struct address_space *mapping,
2604 loff_t pos, unsigned len, unsigned copied,
2605 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606{
Nick Piggind9414772008-09-24 11:32:59 -04002607 int rc;
2608 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002609 struct cifsFileInfo *cfile = file->private_data;
2610 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2611 __u32 pid;
2612
2613 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2614 pid = cfile->pid;
2615 else
2616 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617
Joe Perchesf96637b2013-05-04 22:12:25 -05002618 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002619 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002620
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002621 if (PageChecked(page)) {
2622 if (copied == len)
2623 SetPageUptodate(page);
2624 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002625 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002626 SetPageUptodate(page);
2627
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002629 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002630 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002631 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002632
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002633 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 /* this is probably better than directly calling
2635 partialpage_write since in this function the file handle is
2636 known which we might as well leverage */
2637 /* BB check if anything else missing out of ppw
2638 such as updating last write time */
2639 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002640 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002641 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002643
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002644 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002645 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002646 rc = copied;
2647 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002648 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649 }
2650
Nick Piggind9414772008-09-24 11:32:59 -04002651 if (rc > 0) {
2652 spin_lock(&inode->i_lock);
Rohith Surabattula78c09632021-04-19 19:02:03 +00002653 if (pos > inode->i_size) {
Nick Piggind9414772008-09-24 11:32:59 -04002654 i_size_write(inode, pos);
Rohith Surabattula78c09632021-04-19 19:02:03 +00002655 inode->i_blocks = (512 - 1 + pos) >> 9;
2656 }
Nick Piggind9414772008-09-24 11:32:59 -04002657 spin_unlock(&inode->i_lock);
2658 }
2659
2660 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002661 put_page(page);
Rohith Surabattulac3f207a2021-04-13 00:26:42 -05002662 /* Indication to update ctime and mtime as close is deferred */
2663 set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
Nick Piggind9414772008-09-24 11:32:59 -04002664
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665 return rc;
2666}
2667
Josef Bacik02c24a82011-07-16 20:44:56 -04002668int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2669 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002671 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002673 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002674 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002675 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002676 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002677 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002679 rc = file_write_and_wait_range(file, start, end);
Steve French2391ca42020-02-06 16:04:59 -06002680 if (rc) {
2681 trace_cifs_fsync_err(inode->i_ino, rc);
Josef Bacik02c24a82011-07-16 20:44:56 -04002682 return rc;
Steve French2391ca42020-02-06 16:04:59 -06002683 }
Josef Bacik02c24a82011-07-16 20:44:56 -04002684
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002685 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686
Al Viro35c265e2014-08-19 20:25:34 -04002687 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2688 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002689
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002690 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002691 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002692 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002693 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002694 rc = 0; /* don't care about it in fsync */
2695 }
2696 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002697
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002698 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002699 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2700 server = tcon->ses->server;
Steve French71e68642021-11-10 01:47:48 -06002701 if (server->ops->flush == NULL) {
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002702 rc = -ENOSYS;
Steve French71e68642021-11-10 01:47:48 -06002703 goto strict_fsync_exit;
2704 }
2705
2706 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2707 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2708 if (smbfile) {
2709 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2710 cifsFileInfo_put(smbfile);
2711 } else
2712 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2713 } else
2714 rc = server->ops->flush(xid, tcon, &smbfile->fid);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002715 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002716
Steve French71e68642021-11-10 01:47:48 -06002717strict_fsync_exit:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002718 free_xid(xid);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002719 return rc;
2720}
2721
Josef Bacik02c24a82011-07-16 20:44:56 -04002722int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002723{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002724 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002725 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002726 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002727 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002728 struct cifsFileInfo *smbfile = file->private_data;
Steve French71e68642021-11-10 01:47:48 -06002729 struct inode *inode = file_inode(file);
Al Viro7119e222014-10-22 00:25:12 -04002730 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002731
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002732 rc = file_write_and_wait_range(file, start, end);
Steve Frenchf2bf09e2020-02-05 18:22:37 -06002733 if (rc) {
2734 trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
Josef Bacik02c24a82011-07-16 20:44:56 -04002735 return rc;
Steve Frenchf2bf09e2020-02-05 18:22:37 -06002736 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002737
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002738 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002739
Al Viro35c265e2014-08-19 20:25:34 -04002740 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2741 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002742
2743 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002744 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2745 server = tcon->ses->server;
Steve French71e68642021-11-10 01:47:48 -06002746 if (server->ops->flush == NULL) {
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002747 rc = -ENOSYS;
Steve French71e68642021-11-10 01:47:48 -06002748 goto fsync_exit;
2749 }
2750
2751 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2752 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2753 if (smbfile) {
2754 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2755 cifsFileInfo_put(smbfile);
2756 } else
2757 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2758 } else
2759 rc = server->ops->flush(xid, tcon, &smbfile->fid);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002760 }
Steve Frenchb298f222009-02-21 21:17:43 +00002761
Steve French71e68642021-11-10 01:47:48 -06002762fsync_exit:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002763 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 return rc;
2765}
2766
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767/*
2768 * As file closes, flush all cached write data for this inode checking
2769 * for write behind errors.
2770 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002771int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772{
Al Viro496ad9a2013-01-23 17:07:38 -05002773 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 int rc = 0;
2775
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002776 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002777 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002778
Joe Perchesf96637b2013-05-04 22:12:25 -05002779 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Steve Frenchf2bf09e2020-02-05 18:22:37 -06002780 if (rc)
2781 trace_cifs_flush_err(inode->i_ino, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 return rc;
2783}
2784
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002785static int
2786cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2787{
2788 int rc = 0;
2789 unsigned long i;
2790
2791 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002792 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002793 if (!pages[i]) {
2794 /*
2795 * save number of pages we have already allocated and
2796 * return with ENOMEM error
2797 */
2798 num_pages = i;
2799 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002800 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002801 }
2802 }
2803
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002804 if (rc) {
2805 for (i = 0; i < num_pages; i++)
2806 put_page(pages[i]);
2807 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002808 return rc;
2809}
2810
2811static inline
2812size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2813{
2814 size_t num_pages;
2815 size_t clen;
2816
2817 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002818 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002819
2820 if (cur_len)
2821 *cur_len = clen;
2822
2823 return num_pages;
2824}
2825
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002826static void
Steve French4a5c80d2014-02-07 20:45:12 -06002827cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002828{
2829 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002830 struct cifs_writedata *wdata = container_of(refcount,
2831 struct cifs_writedata, refcount);
2832
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002833 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
Steve French4a5c80d2014-02-07 20:45:12 -06002834 for (i = 0; i < wdata->nr_pages; i++)
2835 put_page(wdata->pages[i]);
2836 cifs_writedata_release(refcount);
2837}
2838
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002839static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2840
Steve French4a5c80d2014-02-07 20:45:12 -06002841static void
2842cifs_uncached_writev_complete(struct work_struct *work)
2843{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002844 struct cifs_writedata *wdata = container_of(work,
2845 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002846 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002847 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2848
2849 spin_lock(&inode->i_lock);
2850 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2851 if (cifsi->server_eof > inode->i_size)
2852 i_size_write(inode, cifsi->server_eof);
2853 spin_unlock(&inode->i_lock);
2854
2855 complete(&wdata->done);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002856 collect_uncached_write_data(wdata->ctx);
2857 /* the below call can possibly free the last ref to aio ctx */
Steve French4a5c80d2014-02-07 20:45:12 -06002858 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002859}
2860
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002861static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002862wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2863 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002864{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002865 size_t save_len, copied, bytes, cur_len = *len;
2866 unsigned long i, nr_pages = *num_pages;
2867
2868 save_len = cur_len;
2869 for (i = 0; i < nr_pages; i++) {
2870 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2871 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2872 cur_len -= copied;
2873 /*
2874 * If we didn't copy as much as we expected, then that
2875 * may mean we trod into an unmapped area. Stop copying
2876 * at that point. On the next pass through the big
2877 * loop, we'll likely end up getting a zero-length
2878 * write and bailing out of it.
2879 */
2880 if (copied < bytes)
2881 break;
2882 }
2883 cur_len = save_len - cur_len;
2884 *len = cur_len;
2885
2886 /*
2887 * If we have no data to send, then that probably means that
2888 * the copy above failed altogether. That's most likely because
2889 * the address in the iovec was bogus. Return -EFAULT and let
2890 * the caller free anything we allocated and bail out.
2891 */
2892 if (!cur_len)
2893 return -EFAULT;
2894
2895 /*
2896 * i + 1 now represents the number of pages we actually used in
2897 * the copy phase above.
2898 */
2899 *num_pages = i + 1;
2900 return 0;
2901}
2902
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002903static int
Long Li8c5f9c12018-10-31 22:13:10 +00002904cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
2905 struct cifs_aio_ctx *ctx)
2906{
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002907 unsigned int wsize;
2908 struct cifs_credits credits;
Long Li8c5f9c12018-10-31 22:13:10 +00002909 int rc;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002910 struct TCP_Server_Info *server = wdata->server;
Long Li8c5f9c12018-10-31 22:13:10 +00002911
Long Li8c5f9c12018-10-31 22:13:10 +00002912 do {
Long Lid53e2922019-03-15 07:54:59 +00002913 if (wdata->cfile->invalidHandle) {
Long Li8c5f9c12018-10-31 22:13:10 +00002914 rc = cifs_reopen_file(wdata->cfile, false);
Long Lid53e2922019-03-15 07:54:59 +00002915 if (rc == -EAGAIN)
2916 continue;
2917 else if (rc)
2918 break;
2919 }
2920
2921
2922 /*
2923 * Wait for credits to resend this wdata.
2924 * Note: we are attempting to resend the whole wdata not in
2925 * segments
2926 */
2927 do {
2928 rc = server->ops->wait_mtu_credits(server, wdata->bytes,
2929 &wsize, &credits);
2930 if (rc)
2931 goto fail;
2932
2933 if (wsize < wdata->bytes) {
2934 add_credits_and_wake_if(server, &credits, 0);
2935 msleep(1000);
2936 }
2937 } while (wsize < wdata->bytes);
2938 wdata->credits = credits;
2939
2940 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2941
2942 if (!rc) {
2943 if (wdata->cfile->invalidHandle)
2944 rc = -EAGAIN;
Long Lib7a55bb2019-10-15 22:54:50 +00002945 else {
2946#ifdef CONFIG_CIFS_SMB_DIRECT
2947 if (wdata->mr) {
2948 wdata->mr->need_invalidate = true;
2949 smbd_deregister_mr(wdata->mr);
2950 wdata->mr = NULL;
2951 }
2952#endif
Long Lid53e2922019-03-15 07:54:59 +00002953 rc = server->ops->async_writev(wdata,
Long Li8c5f9c12018-10-31 22:13:10 +00002954 cifs_uncached_writedata_release);
Long Lib7a55bb2019-10-15 22:54:50 +00002955 }
Long Lid53e2922019-03-15 07:54:59 +00002956 }
Long Li8c5f9c12018-10-31 22:13:10 +00002957
Long Lid53e2922019-03-15 07:54:59 +00002958 /* If the write was successfully sent, we are done */
2959 if (!rc) {
2960 list_add_tail(&wdata->list, wdata_list);
2961 return 0;
2962 }
Long Li8c5f9c12018-10-31 22:13:10 +00002963
Long Lid53e2922019-03-15 07:54:59 +00002964 /* Roll back credits and retry if needed */
2965 add_credits_and_wake_if(server, &wdata->credits, 0);
2966 } while (rc == -EAGAIN);
2967
2968fail:
Long Li8c5f9c12018-10-31 22:13:10 +00002969 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Long Li8c5f9c12018-10-31 22:13:10 +00002970 return rc;
2971}
2972
2973static int
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002974cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2975 struct cifsFileInfo *open_file,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002976 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2977 struct cifs_aio_ctx *ctx)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002978{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002979 int rc = 0;
2980 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002981 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002982 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002983 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002984 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002985 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002986 struct TCP_Server_Info *server;
Long Li8c5f9c12018-10-31 22:13:10 +00002987 struct page **pagevec;
2988 size_t start;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002989 unsigned int xid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002990
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002991 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2992 pid = open_file->pid;
2993 else
2994 pid = current->tgid;
2995
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002996 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002997 xid = get_xid();
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002998
2999 do {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003000 unsigned int wsize;
3001 struct cifs_credits credits_on_stack;
3002 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04003003
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003004 if (open_file->invalidHandle) {
3005 rc = cifs_reopen_file(open_file, false);
3006 if (rc == -EAGAIN)
3007 continue;
3008 else if (rc)
3009 break;
3010 }
3011
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10003012 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003013 &wsize, credits);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04003014 if (rc)
3015 break;
3016
Long Lib6bc8a72018-12-16 23:17:04 +00003017 cur_len = min_t(const size_t, len, wsize);
3018
Long Li8c5f9c12018-10-31 22:13:10 +00003019 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05003020 ssize_t result;
3021
3022 result = iov_iter_get_pages_alloc(
Long Lib6bc8a72018-12-16 23:17:04 +00003023 from, &pagevec, cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05003024 if (result < 0) {
Long Li8c5f9c12018-10-31 22:13:10 +00003025 cifs_dbg(VFS,
Joe Perchesa0a30362020-04-14 22:42:53 -07003026 "direct_writev couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
3027 result, iov_iter_type(from),
3028 from->iov_offset, from->count);
Long Li8c5f9c12018-10-31 22:13:10 +00003029 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00003030
3031 rc = result;
3032 add_credits_and_wake_if(server, credits, 0);
Long Li8c5f9c12018-10-31 22:13:10 +00003033 break;
3034 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05003035 cur_len = (size_t)result;
Long Li8c5f9c12018-10-31 22:13:10 +00003036 iov_iter_advance(from, cur_len);
3037
3038 nr_pages =
3039 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
3040
3041 wdata = cifs_writedata_direct_alloc(pagevec,
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003042 cifs_uncached_writev_complete);
Long Li8c5f9c12018-10-31 22:13:10 +00003043 if (!wdata) {
3044 rc = -ENOMEM;
3045 add_credits_and_wake_if(server, credits, 0);
3046 break;
3047 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003048
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003049
Long Li8c5f9c12018-10-31 22:13:10 +00003050 wdata->page_offset = start;
3051 wdata->tailsz =
3052 nr_pages > 1 ?
3053 cur_len - (PAGE_SIZE - start) -
3054 (nr_pages - 2) * PAGE_SIZE :
3055 cur_len;
3056 } else {
3057 nr_pages = get_numpages(wsize, len, &cur_len);
3058 wdata = cifs_writedata_alloc(nr_pages,
3059 cifs_uncached_writev_complete);
3060 if (!wdata) {
3061 rc = -ENOMEM;
3062 add_credits_and_wake_if(server, credits, 0);
3063 break;
3064 }
Jeff Layton5d81de82014-02-14 07:20:35 -05003065
Long Li8c5f9c12018-10-31 22:13:10 +00003066 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
3067 if (rc) {
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08003068 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00003069 kfree(wdata);
3070 add_credits_and_wake_if(server, credits, 0);
3071 break;
3072 }
3073
3074 num_pages = nr_pages;
3075 rc = wdata_fill_from_iovec(
3076 wdata, from, &cur_len, &num_pages);
3077 if (rc) {
3078 for (i = 0; i < nr_pages; i++)
3079 put_page(wdata->pages[i]);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08003080 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00003081 kfree(wdata);
3082 add_credits_and_wake_if(server, credits, 0);
3083 break;
3084 }
3085
3086 /*
3087 * Bring nr_pages down to the number of pages we
3088 * actually used, and free any pages that we didn't use.
3089 */
3090 for ( ; nr_pages > num_pages; nr_pages--)
3091 put_page(wdata->pages[nr_pages - 1]);
3092
3093 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
3094 }
Jeff Layton5d81de82014-02-14 07:20:35 -05003095
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003096 wdata->sync_mode = WB_SYNC_ALL;
3097 wdata->nr_pages = nr_pages;
3098 wdata->offset = (__u64)offset;
3099 wdata->cfile = cifsFileInfo_get(open_file);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003100 wdata->server = server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003101 wdata->pid = pid;
3102 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07003103 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003104 wdata->credits = credits_on_stack;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003105 wdata->ctx = ctx;
3106 kref_get(&ctx->refcount);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04003107
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003108 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
3109
3110 if (!rc) {
3111 if (wdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003112 rc = -EAGAIN;
3113 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003114 rc = server->ops->async_writev(wdata,
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04003115 cifs_uncached_writedata_release);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003116 }
3117
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003118 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003119 add_credits_and_wake_if(server, &wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06003120 kref_put(&wdata->refcount,
3121 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04003122 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04003123 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04003124 iov_iter_advance(from, offset - saved_offset);
3125 continue;
3126 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003127 break;
3128 }
3129
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04003130 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003131 offset += cur_len;
3132 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003133 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003134
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003135 free_xid(xid);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003136 return rc;
3137}
3138
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003139static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
3140{
3141 struct cifs_writedata *wdata, *tmp;
3142 struct cifs_tcon *tcon;
3143 struct cifs_sb_info *cifs_sb;
3144 struct dentry *dentry = ctx->cfile->dentry;
Dan Carpentere946d3c2021-09-21 23:33:35 +03003145 ssize_t rc;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003146
3147 tcon = tlink_tcon(ctx->cfile->tlink);
3148 cifs_sb = CIFS_SB(dentry->d_sb);
3149
3150 mutex_lock(&ctx->aio_mutex);
3151
3152 if (list_empty(&ctx->list)) {
3153 mutex_unlock(&ctx->aio_mutex);
3154 return;
3155 }
3156
3157 rc = ctx->rc;
3158 /*
3159 * Wait for and collect replies for any successful sends in order of
3160 * increasing offset. Once an error is hit, then return without waiting
3161 * for any more replies.
3162 */
3163restart_loop:
3164 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
3165 if (!rc) {
3166 if (!try_wait_for_completion(&wdata->done)) {
3167 mutex_unlock(&ctx->aio_mutex);
3168 return;
3169 }
3170
3171 if (wdata->result)
3172 rc = wdata->result;
3173 else
3174 ctx->total_len += wdata->bytes;
3175
3176 /* resend call if it's a retryable error */
3177 if (rc == -EAGAIN) {
3178 struct list_head tmp_list;
3179 struct iov_iter tmp_from = ctx->iter;
3180
3181 INIT_LIST_HEAD(&tmp_list);
3182 list_del_init(&wdata->list);
3183
Long Li8c5f9c12018-10-31 22:13:10 +00003184 if (ctx->direct_io)
3185 rc = cifs_resend_wdata(
3186 wdata, &tmp_list, ctx);
3187 else {
3188 iov_iter_advance(&tmp_from,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003189 wdata->offset - ctx->pos);
3190
Long Li8c5f9c12018-10-31 22:13:10 +00003191 rc = cifs_write_from_iter(wdata->offset,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003192 wdata->bytes, &tmp_from,
3193 ctx->cfile, cifs_sb, &tmp_list,
3194 ctx);
Long Lid53e2922019-03-15 07:54:59 +00003195
3196 kref_put(&wdata->refcount,
3197 cifs_uncached_writedata_release);
Long Li8c5f9c12018-10-31 22:13:10 +00003198 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003199
3200 list_splice(&tmp_list, &ctx->list);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003201 goto restart_loop;
3202 }
3203 }
3204 list_del_init(&wdata->list);
3205 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
3206 }
3207
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003208 cifs_stats_bytes_written(tcon, ctx->total_len);
3209 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
3210
3211 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3212
3213 mutex_unlock(&ctx->aio_mutex);
3214
3215 if (ctx->iocb && ctx->iocb->ki_complete)
Jens Axboe6b19b762021-10-21 09:22:35 -06003216 ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003217 else
3218 complete(&ctx->done);
3219}
3220
Long Li8c5f9c12018-10-31 22:13:10 +00003221static ssize_t __cifs_writev(
3222 struct kiocb *iocb, struct iov_iter *from, bool direct)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003223{
Al Viroe9d15932015-04-06 22:44:11 -04003224 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003225 ssize_t total_written = 0;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003226 struct cifsFileInfo *cfile;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003227 struct cifs_tcon *tcon;
3228 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003229 struct cifs_aio_ctx *ctx;
Al Virofc56b982016-09-21 18:18:23 -04003230 struct iov_iter saved_from = *from;
Long Li8c5f9c12018-10-31 22:13:10 +00003231 size_t len = iov_iter_count(from);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003232 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003233
Al Viroe9d15932015-04-06 22:44:11 -04003234 /*
Long Li8c5f9c12018-10-31 22:13:10 +00003235 * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
3236 * In this case, fall back to non-direct write function.
3237 * this could be improved by getting pages directly in ITER_KVEC
Al Viroe9d15932015-04-06 22:44:11 -04003238 */
David Howells66294002019-11-21 08:13:58 +00003239 if (direct && iov_iter_is_kvec(from)) {
Long Li8c5f9c12018-10-31 22:13:10 +00003240 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
3241 direct = false;
3242 }
Al Viroe9d15932015-04-06 22:44:11 -04003243
Al Viro3309dd02015-04-09 12:55:47 -04003244 rc = generic_write_checks(iocb, from);
3245 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003246 return rc;
3247
Al Viro7119e222014-10-22 00:25:12 -04003248 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003249 cfile = file->private_data;
3250 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003251
3252 if (!tcon->ses->server->ops->async_writev)
3253 return -ENOSYS;
3254
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003255 ctx = cifs_aio_ctx_alloc();
3256 if (!ctx)
3257 return -ENOMEM;
3258
3259 ctx->cfile = cifsFileInfo_get(cfile);
3260
3261 if (!is_sync_kiocb(iocb))
3262 ctx->iocb = iocb;
3263
3264 ctx->pos = iocb->ki_pos;
3265
Long Li8c5f9c12018-10-31 22:13:10 +00003266 if (direct) {
3267 ctx->direct_io = true;
3268 ctx->iter = *from;
3269 ctx->len = len;
3270 } else {
3271 rc = setup_aio_ctx_iter(ctx, from, WRITE);
3272 if (rc) {
3273 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3274 return rc;
3275 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003276 }
3277
3278 /* grab a lock here due to read response handlers can access ctx */
3279 mutex_lock(&ctx->aio_mutex);
3280
3281 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
3282 cfile, cifs_sb, &ctx->list, ctx);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003283
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003284 /*
3285 * If at least one write was successfully sent, then discard any rc
3286 * value from the later writes. If the other write succeeds, then
3287 * we'll end up returning whatever was written. If it fails, then
3288 * we'll get a new rc value from that.
3289 */
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003290 if (!list_empty(&ctx->list))
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003291 rc = 0;
3292
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003293 mutex_unlock(&ctx->aio_mutex);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003294
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003295 if (rc) {
3296 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3297 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003298 }
3299
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003300 if (!is_sync_kiocb(iocb)) {
3301 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3302 return -EIOCBQUEUED;
3303 }
3304
3305 rc = wait_for_completion_killable(&ctx->done);
3306 if (rc) {
3307 mutex_lock(&ctx->aio_mutex);
3308 ctx->rc = rc = -EINTR;
3309 total_written = ctx->total_len;
3310 mutex_unlock(&ctx->aio_mutex);
3311 } else {
3312 rc = ctx->rc;
3313 total_written = ctx->total_len;
3314 }
3315
3316 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3317
Al Viroe9d15932015-04-06 22:44:11 -04003318 if (unlikely(!total_written))
3319 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003320
Al Viroe9d15932015-04-06 22:44:11 -04003321 iocb->ki_pos += total_written;
Al Viroe9d15932015-04-06 22:44:11 -04003322 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003323}
3324
Long Li8c5f9c12018-10-31 22:13:10 +00003325ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
3326{
3327 return __cifs_writev(iocb, from, true);
3328}
3329
3330ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
3331{
3332 return __cifs_writev(iocb, from, false);
3333}
3334
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003335static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003336cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003337{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003338 struct file *file = iocb->ki_filp;
3339 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
3340 struct inode *inode = file->f_mapping->host;
3341 struct cifsInodeInfo *cinode = CIFS_I(inode);
3342 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04003343 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003344
Rabin Vincent966681c2017-06-29 16:01:42 +02003345 inode_lock(inode);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003346 /*
3347 * We need to hold the sem to be sure nobody modifies lock list
3348 * with a brlock that prevents writing.
3349 */
3350 down_read(&cinode->lock_sem);
Al Viro5f380c72015-04-07 11:28:12 -04003351
Al Viro3309dd02015-04-09 12:55:47 -04003352 rc = generic_write_checks(iocb, from);
3353 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04003354 goto out;
3355
Al Viro5f380c72015-04-07 11:28:12 -04003356 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Ronnie Sahlberg96457592018-10-04 09:24:38 +10003357 server->vals->exclusive_lock_type, 0,
3358 NULL, CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04003359 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04003360 else
3361 rc = -EACCES;
3362out:
Rabin Vincent966681c2017-06-29 16:01:42 +02003363 up_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05003364 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04003365
Christoph Hellwige2592212016-04-07 08:52:01 -07003366 if (rc > 0)
3367 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003368 return rc;
3369}
3370
3371ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003372cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003373{
Al Viro496ad9a2013-01-23 17:07:38 -05003374 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003375 struct cifsInodeInfo *cinode = CIFS_I(inode);
3376 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3377 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3378 iocb->ki_filp->private_data;
3379 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003380 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003381
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003382 written = cifs_get_writer(cinode);
3383 if (written)
3384 return written;
3385
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003386 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003387 if (cap_unix(tcon->ses) &&
3388 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003389 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04003390 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003391 goto out;
3392 }
Al Viro3dae8752014-04-03 12:05:17 -04003393 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003394 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04003395 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003396 /*
3397 * For non-oplocked files in strict cache mode we need to write the data
3398 * to the server exactly from the pos to pos+len-1 rather than flush all
3399 * affected pages because it may cause a error with mandatory locks on
3400 * these pages but not on the region from pos to ppos+len-1.
3401 */
Al Viro3dae8752014-04-03 12:05:17 -04003402 written = cifs_user_writev(iocb, from);
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003403 if (CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003404 /*
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003405 * We have read level caching and we have just sent a write
3406 * request to the server thus making data in the cache stale.
3407 * Zap the cache and set oplock/lease level to NONE to avoid
3408 * reading stale data from the cache. All subsequent read
3409 * operations will read new data from the server.
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003410 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003411 cifs_zap_mapping(inode);
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003412 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05003413 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003414 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003415 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003416out:
3417 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003418 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003419}
3420
Jeff Layton0471ca32012-05-16 07:13:16 -04003421static struct cifs_readdata *
Long Lif9f5aca2018-05-30 12:47:54 -07003422cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04003423{
3424 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003425
Long Lif9f5aca2018-05-30 12:47:54 -07003426 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04003427 if (rdata != NULL) {
Long Lif9f5aca2018-05-30 12:47:54 -07003428 rdata->pages = pages;
Jeff Layton6993f742012-05-16 07:13:17 -04003429 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003430 INIT_LIST_HEAD(&rdata->list);
3431 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04003432 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04003433 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003434
Jeff Layton0471ca32012-05-16 07:13:16 -04003435 return rdata;
3436}
3437
Long Lif9f5aca2018-05-30 12:47:54 -07003438static struct cifs_readdata *
3439cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
3440{
3441 struct page **pages =
Kees Cook6396bb22018-06-12 14:03:40 -07003442 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Long Lif9f5aca2018-05-30 12:47:54 -07003443 struct cifs_readdata *ret = NULL;
3444
3445 if (pages) {
3446 ret = cifs_readdata_direct_alloc(pages, complete);
3447 if (!ret)
3448 kfree(pages);
3449 }
3450
3451 return ret;
3452}
3453
Jeff Layton6993f742012-05-16 07:13:17 -04003454void
3455cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04003456{
Jeff Layton6993f742012-05-16 07:13:17 -04003457 struct cifs_readdata *rdata = container_of(refcount,
3458 struct cifs_readdata, refcount);
Long Libd3dcc62017-11-22 17:38:47 -07003459#ifdef CONFIG_CIFS_SMB_DIRECT
3460 if (rdata->mr) {
3461 smbd_deregister_mr(rdata->mr);
3462 rdata->mr = NULL;
3463 }
3464#endif
Jeff Layton6993f742012-05-16 07:13:17 -04003465 if (rdata->cfile)
3466 cifsFileInfo_put(rdata->cfile);
3467
Long Lif9f5aca2018-05-30 12:47:54 -07003468 kvfree(rdata->pages);
Jeff Layton0471ca32012-05-16 07:13:16 -04003469 kfree(rdata);
3470}
3471
Jeff Layton2a1bb132012-05-16 07:13:17 -04003472static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003473cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04003474{
3475 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003476 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04003477 unsigned int i;
3478
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003479 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04003480 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3481 if (!page) {
3482 rc = -ENOMEM;
3483 break;
3484 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003485 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04003486 }
3487
3488 if (rc) {
Roberto Bergantinos Corpas31fad7d2019-05-28 09:38:14 +02003489 unsigned int nr_page_failed = i;
3490
3491 for (i = 0; i < nr_page_failed; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003492 put_page(rdata->pages[i]);
3493 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04003494 }
3495 }
3496 return rc;
3497}
3498
3499static void
3500cifs_uncached_readdata_release(struct kref *refcount)
3501{
Jeff Layton1c892542012-05-16 07:13:17 -04003502 struct cifs_readdata *rdata = container_of(refcount,
3503 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003504 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003505
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003506 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003507 for (i = 0; i < rdata->nr_pages; i++) {
3508 put_page(rdata->pages[i]);
Jeff Layton1c892542012-05-16 07:13:17 -04003509 }
3510 cifs_readdata_release(refcount);
3511}
3512
Jeff Layton1c892542012-05-16 07:13:17 -04003513/**
3514 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3515 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05003516 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04003517 *
3518 * This function copies data from a list of pages in a readdata response into
3519 * an array of iovecs. It will first calculate where the data should go
3520 * based on the info in the readdata and then copy the data into that spot.
3521 */
Al Viro7f25bba2014-02-04 14:07:43 -05003522static int
3523cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04003524{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04003525 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003526 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003527
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003528 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003529 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02003530 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003531 size_t written;
3532
David Howells00e23702018-10-22 13:07:28 +01003533 if (unlikely(iov_iter_is_pipe(iter))) {
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003534 void *addr = kmap_atomic(page);
3535
3536 written = copy_to_iter(addr, copy, iter);
3537 kunmap_atomic(addr);
3538 } else
3539 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05003540 remaining -= written;
3541 if (written < copy && iov_iter_count(iter) > 0)
3542 break;
Jeff Layton1c892542012-05-16 07:13:17 -04003543 }
Al Viro7f25bba2014-02-04 14:07:43 -05003544 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04003545}
3546
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003547static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3548
Jeff Layton1c892542012-05-16 07:13:17 -04003549static void
3550cifs_uncached_readv_complete(struct work_struct *work)
3551{
3552 struct cifs_readdata *rdata = container_of(work,
3553 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04003554
3555 complete(&rdata->done);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003556 collect_uncached_read_data(rdata->ctx);
3557 /* the below call can possibly free the last ref to aio ctx */
Jeff Layton1c892542012-05-16 07:13:17 -04003558 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3559}
3560
3561static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003562uncached_fill_pages(struct TCP_Server_Info *server,
3563 struct cifs_readdata *rdata, struct iov_iter *iter,
3564 unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04003565{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003566 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003567 unsigned int i;
3568 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003569 unsigned int page_offset = rdata->page_offset;
Jeff Layton1c892542012-05-16 07:13:17 -04003570
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003571 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07003572 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003573 for (i = 0; i < nr_pages; i++) {
3574 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05003575 size_t n;
Long Li1dbe3462018-05-30 12:47:55 -07003576 unsigned int segment_size = rdata->pagesz;
3577
3578 if (i == 0)
3579 segment_size -= page_offset;
3580 else
3581 page_offset = 0;
3582
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003583
Al Viro71335662016-01-09 19:54:50 -05003584 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04003585 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003586 rdata->pages[i] = NULL;
3587 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04003588 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07003589 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003590 }
Long Li1dbe3462018-05-30 12:47:55 -07003591
Al Viro71335662016-01-09 19:54:50 -05003592 n = len;
Long Li1dbe3462018-05-30 12:47:55 -07003593 if (len >= segment_size)
Al Viro71335662016-01-09 19:54:50 -05003594 /* enough data to fill the page */
Long Li1dbe3462018-05-30 12:47:55 -07003595 n = segment_size;
3596 else
Al Viro71335662016-01-09 19:54:50 -05003597 rdata->tailsz = len;
Long Li1dbe3462018-05-30 12:47:55 -07003598 len -= n;
3599
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003600 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003601 result = copy_page_from_iter(
3602 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003603#ifdef CONFIG_CIFS_SMB_DIRECT
3604 else if (rdata->mr)
3605 result = n;
3606#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003607 else
Long Li1dbe3462018-05-30 12:47:55 -07003608 result = cifs_read_page_from_socket(
3609 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003610 if (result < 0)
3611 break;
3612
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003613 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003614 }
3615
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003616 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3617 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003618}
3619
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003620static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003621cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3622 struct cifs_readdata *rdata, unsigned int len)
3623{
3624 return uncached_fill_pages(server, rdata, NULL, len);
3625}
3626
3627static int
3628cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3629 struct cifs_readdata *rdata,
3630 struct iov_iter *iter)
3631{
3632 return uncached_fill_pages(server, rdata, iter, iter->count);
3633}
3634
Long Li6e6e2b82018-10-31 22:13:09 +00003635static int cifs_resend_rdata(struct cifs_readdata *rdata,
3636 struct list_head *rdata_list,
3637 struct cifs_aio_ctx *ctx)
3638{
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003639 unsigned int rsize;
3640 struct cifs_credits credits;
Long Li6e6e2b82018-10-31 22:13:09 +00003641 int rc;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003642 struct TCP_Server_Info *server;
3643
3644 /* XXX: should we pick a new channel here? */
3645 server = rdata->server;
Long Li6e6e2b82018-10-31 22:13:09 +00003646
Long Li6e6e2b82018-10-31 22:13:09 +00003647 do {
Long Li0b0dfd52019-03-15 07:55:00 +00003648 if (rdata->cfile->invalidHandle) {
3649 rc = cifs_reopen_file(rdata->cfile, true);
3650 if (rc == -EAGAIN)
3651 continue;
3652 else if (rc)
3653 break;
3654 }
3655
3656 /*
3657 * Wait for credits to resend this rdata.
3658 * Note: we are attempting to resend the whole rdata not in
3659 * segments
3660 */
3661 do {
3662 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
Long Li6e6e2b82018-10-31 22:13:09 +00003663 &rsize, &credits);
3664
Long Li0b0dfd52019-03-15 07:55:00 +00003665 if (rc)
3666 goto fail;
Long Li6e6e2b82018-10-31 22:13:09 +00003667
Long Li0b0dfd52019-03-15 07:55:00 +00003668 if (rsize < rdata->bytes) {
3669 add_credits_and_wake_if(server, &credits, 0);
3670 msleep(1000);
3671 }
3672 } while (rsize < rdata->bytes);
3673 rdata->credits = credits;
3674
3675 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3676 if (!rc) {
3677 if (rdata->cfile->invalidHandle)
3678 rc = -EAGAIN;
Long Lib7a55bb2019-10-15 22:54:50 +00003679 else {
3680#ifdef CONFIG_CIFS_SMB_DIRECT
3681 if (rdata->mr) {
3682 rdata->mr->need_invalidate = true;
3683 smbd_deregister_mr(rdata->mr);
3684 rdata->mr = NULL;
3685 }
3686#endif
Long Li0b0dfd52019-03-15 07:55:00 +00003687 rc = server->ops->async_readv(rdata);
Long Lib7a55bb2019-10-15 22:54:50 +00003688 }
Long Li6e6e2b82018-10-31 22:13:09 +00003689 }
Long Li6e6e2b82018-10-31 22:13:09 +00003690
Long Li0b0dfd52019-03-15 07:55:00 +00003691 /* If the read was successfully sent, we are done */
3692 if (!rc) {
3693 /* Add to aio pending list */
3694 list_add_tail(&rdata->list, rdata_list);
3695 return 0;
3696 }
Long Li6e6e2b82018-10-31 22:13:09 +00003697
Long Li0b0dfd52019-03-15 07:55:00 +00003698 /* Roll back credits and retry if needed */
3699 add_credits_and_wake_if(server, &rdata->credits, 0);
3700 } while (rc == -EAGAIN);
Long Li6e6e2b82018-10-31 22:13:09 +00003701
Long Li0b0dfd52019-03-15 07:55:00 +00003702fail:
3703 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Long Li6e6e2b82018-10-31 22:13:09 +00003704 return rc;
3705}
3706
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003707static int
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003708cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003709 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3710 struct cifs_aio_ctx *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003712 struct cifs_readdata *rdata;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003713 unsigned int npages, rsize;
3714 struct cifs_credits credits_on_stack;
3715 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003716 size_t cur_len;
3717 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003718 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003719 struct TCP_Server_Info *server;
Long Li6e6e2b82018-10-31 22:13:09 +00003720 struct page **pagevec;
3721 size_t start;
3722 struct iov_iter direct_iov = ctx->iter;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003723
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003724 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003725
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003726 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3727 pid = open_file->pid;
3728 else
3729 pid = current->tgid;
3730
Long Li6e6e2b82018-10-31 22:13:09 +00003731 if (ctx->direct_io)
3732 iov_iter_advance(&direct_iov, offset - ctx->pos);
3733
Jeff Layton1c892542012-05-16 07:13:17 -04003734 do {
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003735 if (open_file->invalidHandle) {
3736 rc = cifs_reopen_file(open_file, true);
3737 if (rc == -EAGAIN)
3738 continue;
3739 else if (rc)
3740 break;
3741 }
3742
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10003743 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003744 &rsize, credits);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003745 if (rc)
3746 break;
3747
3748 cur_len = min_t(const size_t, len, rsize);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003749
Long Li6e6e2b82018-10-31 22:13:09 +00003750 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05003751 ssize_t result;
Long Li6e6e2b82018-10-31 22:13:09 +00003752
Steve Frenchb98e26d2018-11-01 10:54:32 -05003753 result = iov_iter_get_pages_alloc(
Long Li6e6e2b82018-10-31 22:13:09 +00003754 &direct_iov, &pagevec,
3755 cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05003756 if (result < 0) {
Long Li6e6e2b82018-10-31 22:13:09 +00003757 cifs_dbg(VFS,
Joe Perchesa0a30362020-04-14 22:42:53 -07003758 "Couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
3759 result, iov_iter_type(&direct_iov),
3760 direct_iov.iov_offset,
3761 direct_iov.count);
Long Li6e6e2b82018-10-31 22:13:09 +00003762 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00003763
3764 rc = result;
3765 add_credits_and_wake_if(server, credits, 0);
Long Li6e6e2b82018-10-31 22:13:09 +00003766 break;
3767 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05003768 cur_len = (size_t)result;
Long Li6e6e2b82018-10-31 22:13:09 +00003769 iov_iter_advance(&direct_iov, cur_len);
3770
3771 rdata = cifs_readdata_direct_alloc(
3772 pagevec, cifs_uncached_readv_complete);
3773 if (!rdata) {
3774 add_credits_and_wake_if(server, credits, 0);
3775 rc = -ENOMEM;
3776 break;
3777 }
3778
3779 npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
3780 rdata->page_offset = start;
3781 rdata->tailsz = npages > 1 ?
3782 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
3783 cur_len;
3784
3785 } else {
3786
3787 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3788 /* allocate a readdata struct */
3789 rdata = cifs_readdata_alloc(npages,
Jeff Layton1c892542012-05-16 07:13:17 -04003790 cifs_uncached_readv_complete);
Long Li6e6e2b82018-10-31 22:13:09 +00003791 if (!rdata) {
3792 add_credits_and_wake_if(server, credits, 0);
3793 rc = -ENOMEM;
3794 break;
3795 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003796
Long Li6e6e2b82018-10-31 22:13:09 +00003797 rc = cifs_read_allocate_pages(rdata, npages);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08003798 if (rc) {
3799 kvfree(rdata->pages);
3800 kfree(rdata);
3801 add_credits_and_wake_if(server, credits, 0);
3802 break;
3803 }
Long Li6e6e2b82018-10-31 22:13:09 +00003804
3805 rdata->tailsz = PAGE_SIZE;
3806 }
Jeff Layton1c892542012-05-16 07:13:17 -04003807
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003808 rdata->server = server;
Jeff Layton1c892542012-05-16 07:13:17 -04003809 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003810 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003811 rdata->offset = offset;
3812 rdata->bytes = cur_len;
3813 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003814 rdata->pagesz = PAGE_SIZE;
3815 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003816 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003817 rdata->credits = credits_on_stack;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003818 rdata->ctx = ctx;
3819 kref_get(&ctx->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003820
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003821 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3822
3823 if (!rc) {
3824 if (rdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003825 rc = -EAGAIN;
3826 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003827 rc = server->ops->async_readv(rdata);
3828 }
3829
Jeff Layton1c892542012-05-16 07:13:17 -04003830 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003831 add_credits_and_wake_if(server, &rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003832 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003833 cifs_uncached_readdata_release);
3834 if (rc == -EAGAIN) {
3835 iov_iter_revert(&direct_iov, cur_len);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003836 continue;
Long Li6e6e2b82018-10-31 22:13:09 +00003837 }
Jeff Layton1c892542012-05-16 07:13:17 -04003838 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839 }
Jeff Layton1c892542012-05-16 07:13:17 -04003840
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003841 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003842 offset += cur_len;
3843 len -= cur_len;
3844 } while (len > 0);
3845
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003846 return rc;
3847}
3848
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003849static void
3850collect_uncached_read_data(struct cifs_aio_ctx *ctx)
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003851{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003852 struct cifs_readdata *rdata, *tmp;
3853 struct iov_iter *to = &ctx->iter;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003854 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003855 int rc;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003856
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003857 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003858
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003859 mutex_lock(&ctx->aio_mutex);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003860
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003861 if (list_empty(&ctx->list)) {
3862 mutex_unlock(&ctx->aio_mutex);
3863 return;
3864 }
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003865
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003866 rc = ctx->rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003867 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003868again:
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003869 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
Jeff Layton1c892542012-05-16 07:13:17 -04003870 if (!rc) {
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003871 if (!try_wait_for_completion(&rdata->done)) {
3872 mutex_unlock(&ctx->aio_mutex);
3873 return;
3874 }
3875
3876 if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003877 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003878 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003879 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003880
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003881 list_del_init(&rdata->list);
3882 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003883
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003884 /*
3885 * Got a part of data and then reconnect has
3886 * happened -- fill the buffer and continue
3887 * reading.
3888 */
3889 if (got_bytes && got_bytes < rdata->bytes) {
Long Li6e6e2b82018-10-31 22:13:09 +00003890 rc = 0;
3891 if (!ctx->direct_io)
3892 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003893 if (rc) {
3894 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003895 cifs_uncached_readdata_release);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003896 continue;
3897 }
3898 }
3899
Long Li6e6e2b82018-10-31 22:13:09 +00003900 if (ctx->direct_io) {
3901 /*
3902 * Re-use rdata as this is a
3903 * direct I/O
3904 */
3905 rc = cifs_resend_rdata(
3906 rdata,
3907 &tmp_list, ctx);
3908 } else {
3909 rc = cifs_send_async_read(
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003910 rdata->offset + got_bytes,
3911 rdata->bytes - got_bytes,
3912 rdata->cfile, cifs_sb,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003913 &tmp_list, ctx);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003914
Long Li6e6e2b82018-10-31 22:13:09 +00003915 kref_put(&rdata->refcount,
3916 cifs_uncached_readdata_release);
3917 }
3918
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003919 list_splice(&tmp_list, &ctx->list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003920
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003921 goto again;
3922 } else if (rdata->result)
3923 rc = rdata->result;
Long Li6e6e2b82018-10-31 22:13:09 +00003924 else if (!ctx->direct_io)
Jeff Layton1c892542012-05-16 07:13:17 -04003925 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003926
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003927 /* if there was a short read -- discard anything left */
3928 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3929 rc = -ENODATA;
Long Li6e6e2b82018-10-31 22:13:09 +00003930
3931 ctx->total_len += rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003932 }
3933 list_del_init(&rdata->list);
3934 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003936
Jérôme Glisse13f59382019-04-10 15:37:47 -04003937 if (!ctx->direct_io)
Long Li6e6e2b82018-10-31 22:13:09 +00003938 ctx->total_len = ctx->len - iov_iter_count(to);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003939
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003940 /* mask nodata case */
3941 if (rc == -ENODATA)
3942 rc = 0;
3943
Yilu Lin97adda82020-03-18 11:59:19 +08003944 ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003945
3946 mutex_unlock(&ctx->aio_mutex);
3947
3948 if (ctx->iocb && ctx->iocb->ki_complete)
Jens Axboe6b19b762021-10-21 09:22:35 -06003949 ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003950 else
3951 complete(&ctx->done);
3952}
3953
Long Li6e6e2b82018-10-31 22:13:09 +00003954static ssize_t __cifs_readv(
3955 struct kiocb *iocb, struct iov_iter *to, bool direct)
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003956{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003957 size_t len;
Long Li6e6e2b82018-10-31 22:13:09 +00003958 struct file *file = iocb->ki_filp;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003959 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003960 struct cifsFileInfo *cfile;
Long Li6e6e2b82018-10-31 22:13:09 +00003961 struct cifs_tcon *tcon;
3962 ssize_t rc, total_read = 0;
3963 loff_t offset = iocb->ki_pos;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003964 struct cifs_aio_ctx *ctx;
3965
Long Li6e6e2b82018-10-31 22:13:09 +00003966 /*
3967 * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
3968 * fall back to data copy read path
3969 * this could be improved by getting pages directly in ITER_KVEC
3970 */
David Howells66294002019-11-21 08:13:58 +00003971 if (direct && iov_iter_is_kvec(to)) {
Long Li6e6e2b82018-10-31 22:13:09 +00003972 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
3973 direct = false;
3974 }
3975
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003976 len = iov_iter_count(to);
3977 if (!len)
3978 return 0;
3979
3980 cifs_sb = CIFS_FILE_SB(file);
3981 cfile = file->private_data;
3982 tcon = tlink_tcon(cfile->tlink);
3983
3984 if (!tcon->ses->server->ops->async_readv)
3985 return -ENOSYS;
3986
3987 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3988 cifs_dbg(FYI, "attempting read on write only file instance\n");
3989
3990 ctx = cifs_aio_ctx_alloc();
3991 if (!ctx)
3992 return -ENOMEM;
3993
3994 ctx->cfile = cifsFileInfo_get(cfile);
3995
3996 if (!is_sync_kiocb(iocb))
3997 ctx->iocb = iocb;
3998
David Howells00e23702018-10-22 13:07:28 +01003999 if (iter_is_iovec(to))
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07004000 ctx->should_dirty = true;
4001
Long Li6e6e2b82018-10-31 22:13:09 +00004002 if (direct) {
4003 ctx->pos = offset;
4004 ctx->direct_io = true;
4005 ctx->iter = *to;
4006 ctx->len = len;
4007 } else {
4008 rc = setup_aio_ctx_iter(ctx, to, READ);
4009 if (rc) {
4010 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4011 return rc;
4012 }
4013 len = ctx->len;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07004014 }
4015
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07004016 /* grab a lock here due to read response handlers can access ctx */
4017 mutex_lock(&ctx->aio_mutex);
4018
4019 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
4020
4021 /* if at least one read request send succeeded, then reset rc */
4022 if (!list_empty(&ctx->list))
4023 rc = 0;
4024
4025 mutex_unlock(&ctx->aio_mutex);
4026
4027 if (rc) {
4028 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4029 return rc;
4030 }
4031
4032 if (!is_sync_kiocb(iocb)) {
4033 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4034 return -EIOCBQUEUED;
4035 }
4036
4037 rc = wait_for_completion_killable(&ctx->done);
4038 if (rc) {
4039 mutex_lock(&ctx->aio_mutex);
4040 ctx->rc = rc = -EINTR;
4041 total_read = ctx->total_len;
4042 mutex_unlock(&ctx->aio_mutex);
4043 } else {
4044 rc = ctx->rc;
4045 total_read = ctx->total_len;
4046 }
4047
4048 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4049
Al Viro0165e812014-02-04 14:19:48 -05004050 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04004051 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05004052 return total_read;
4053 }
4054 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03004055}
4056
Long Li6e6e2b82018-10-31 22:13:09 +00004057ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
4058{
4059 return __cifs_readv(iocb, to, true);
4060}
4061
4062ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
4063{
4064 return __cifs_readv(iocb, to, false);
4065}
4066
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004067ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04004068cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03004069{
Al Viro496ad9a2013-01-23 17:07:38 -05004070 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004071 struct cifsInodeInfo *cinode = CIFS_I(inode);
4072 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4073 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
4074 iocb->ki_filp->private_data;
4075 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
4076 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03004077
4078 /*
4079 * In strict cache mode we need to read from the server all the time
4080 * if we don't have level II oplock because the server can delay mtime
4081 * change - so we can't make a decision about inode invalidating.
4082 * And we can also fail with pagereading if there are mandatory locks
4083 * on pages affected by this read but not on the region from pos to
4084 * pos+len-1.
4085 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004086 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04004087 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03004088
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004089 if (cap_unix(tcon->ses) &&
4090 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
4091 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04004092 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004093
4094 /*
4095 * We need to hold the sem to be sure nobody modifies lock list
4096 * with a brlock that prevents reading.
4097 */
4098 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04004099 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004100 tcon->ses->server->vals->shared_lock_type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10004101 0, NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04004102 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004103 up_read(&cinode->lock_sem);
4104 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03004105}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004107static ssize_t
4108cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109{
4110 int rc = -EACCES;
4111 unsigned int bytes_read = 0;
4112 unsigned int total_read;
4113 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04004114 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004115 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004116 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004117 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004118 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004119 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004120 struct cifsFileInfo *open_file;
Aurelien Aptel7c065142020-06-04 17:23:55 +02004121 struct cifs_io_parms io_parms = {0};
Steve Frenchec637e32005-12-12 20:53:18 -08004122 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004123 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004124
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004125 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04004126 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127
Jeff Layton5eba8ab2011-10-19 15:30:26 -04004128 /* FIXME: set up handlers for larger reads and/or convert to async */
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10004129 rsize = min_t(unsigned int, cifs_sb->ctx->rsize, CIFSMaxBufSize);
Jeff Layton5eba8ab2011-10-19 15:30:26 -04004130
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304132 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004133 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304134 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07004136 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004137 tcon = tlink_tcon(open_file->tlink);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004138 server = cifs_pick_channel(tcon->ses);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004139
4140 if (!server->ops->sync_read) {
4141 free_xid(xid);
4142 return -ENOSYS;
4143 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004145 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4146 pid = open_file->pid;
4147 else
4148 pid = current->tgid;
4149
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05004151 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004153 for (total_read = 0, cur_offset = read_data; read_size > total_read;
4154 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04004155 do {
4156 current_read_size = min_t(uint, read_size - total_read,
4157 rsize);
4158 /*
4159 * For windows me and 9x we do not want to request more
4160 * than it negotiated since it will refuse the read
4161 * then.
4162 */
Steve French9bd21d42020-05-13 10:27:16 -05004163 if (!(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004164 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04004165 current_read_size = min_t(uint,
4166 current_read_size, CIFSMaxBufSize);
4167 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00004168 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04004169 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170 if (rc != 0)
4171 break;
4172 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004173 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004174 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004175 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004176 io_parms.length = current_read_size;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004177 io_parms.server = server;
Steve Frenchdb8b6312014-09-22 05:13:55 -05004178 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004179 &bytes_read, &cur_offset,
4180 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04004181 } while (rc == -EAGAIN);
4182
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183 if (rc || (bytes_read == 0)) {
4184 if (total_read) {
4185 break;
4186 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004187 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188 return rc;
4189 }
4190 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004191 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004192 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193 }
4194 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004195 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196 return total_read;
4197}
4198
Jeff Laytonca83ce32011-04-12 09:13:44 -04004199/*
4200 * If the page is mmap'ed into a process' page tables, then we need to make
4201 * sure that it doesn't change while being written back.
4202 */
Souptick Joardera5240cb2018-04-15 00:58:25 +05304203static vm_fault_t
Dave Jiang11bac802017-02-24 14:56:41 -08004204cifs_page_mkwrite(struct vm_fault *vmf)
Jeff Laytonca83ce32011-04-12 09:13:44 -04004205{
4206 struct page *page = vmf->page;
Shyam Prasad N18d04062021-08-10 10:22:28 +00004207
David Howells70431bf2020-11-17 15:56:59 +00004208#ifdef CONFIG_CIFS_FSCACHE
4209 if (PageFsCache(page) &&
4210 wait_on_page_fscache_killable(page) < 0)
4211 return VM_FAULT_RETRY;
4212#endif
Jeff Laytonca83ce32011-04-12 09:13:44 -04004213
4214 lock_page(page);
4215 return VM_FAULT_LOCKED;
4216}
4217
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07004218static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04004219 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07004220 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04004221 .page_mkwrite = cifs_page_mkwrite,
4222};
4223
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004224int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
4225{
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004226 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05004227 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004228
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004229 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004230
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004231 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004232 rc = cifs_zap_mapping(inode);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004233 if (!rc)
4234 rc = generic_file_mmap(file, vma);
4235 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04004236 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004237
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004238 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004239 return rc;
4240}
4241
Linus Torvalds1da177e2005-04-16 15:20:36 -07004242int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
4243{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244 int rc, xid;
4245
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004246 xid = get_xid();
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004247
Jeff Laytonabab0952010-02-12 07:44:18 -05004248 rc = cifs_revalidate_file(file);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004249 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004250 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
4251 rc);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004252 if (!rc)
4253 rc = generic_file_mmap(file, vma);
4254 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04004255 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004256
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004257 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258 return rc;
4259}
4260
Jeff Layton0471ca32012-05-16 07:13:16 -04004261static void
4262cifs_readv_complete(struct work_struct *work)
4263{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004264 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04004265 struct cifs_readdata *rdata = container_of(work,
4266 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04004267
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004268 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004269 for (i = 0; i < rdata->nr_pages; i++) {
4270 struct page *page = rdata->pages[i];
4271
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004272 if (rdata->result == 0 ||
4273 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04004274 flush_dcache_page(page);
4275 SetPageUptodate(page);
Shyam Prasad N18d04062021-08-10 10:22:28 +00004276 } else
4277 SetPageError(page);
Jeff Layton0471ca32012-05-16 07:13:16 -04004278
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004279 if (rdata->result == 0 ||
4280 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04004281 cifs_readpage_to_fscache(rdata->mapping->host, page);
4282
David Howells0174ee92022-01-27 16:02:58 +00004283 unlock_page(page);
4284
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004285 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004286
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004287 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004288 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04004289 }
Jeff Layton6993f742012-05-16 07:13:17 -04004290 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04004291}
4292
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004293static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004294readpages_fill_pages(struct TCP_Server_Info *server,
4295 struct cifs_readdata *rdata, struct iov_iter *iter,
4296 unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004297{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004298 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004299 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004300 u64 eof;
4301 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004302 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07004303 unsigned int page_offset = rdata->page_offset;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004304
4305 /* determine the eof that the server (probably) has */
4306 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004307 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05004308 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004309
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004310 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004311 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004312 for (i = 0; i < nr_pages; i++) {
4313 struct page *page = rdata->pages[i];
Long Li1dbe3462018-05-30 12:47:55 -07004314 unsigned int to_read = rdata->pagesz;
4315 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004316
Long Li1dbe3462018-05-30 12:47:55 -07004317 if (i == 0)
4318 to_read -= page_offset;
4319 else
4320 page_offset = 0;
4321
4322 n = to_read;
4323
4324 if (len >= to_read) {
4325 len -= to_read;
Jeff Layton8321fec2012-09-19 06:22:32 -07004326 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004327 /* enough for partial page, fill and zero the rest */
Long Li1dbe3462018-05-30 12:47:55 -07004328 zero_user(page, len + page_offset, to_read - len);
Al Viro71335662016-01-09 19:54:50 -05004329 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07004330 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004331 } else if (page->index > eof_index) {
4332 /*
4333 * The VFS will not try to do readahead past the
4334 * i_size, but it's possible that we have outstanding
4335 * writes with gaps in the middle and the i_size hasn't
4336 * caught up yet. Populate those with zeroed out pages
4337 * to prevent the VFS from repeatedly attempting to
4338 * fill them until the writes are flushed.
4339 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004340 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004341 flush_dcache_page(page);
4342 SetPageUptodate(page);
4343 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004344 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004345 rdata->pages[i] = NULL;
4346 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07004347 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004348 } else {
4349 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004350 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004351 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004352 rdata->pages[i] = NULL;
4353 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07004354 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004355 }
Jeff Layton8321fec2012-09-19 06:22:32 -07004356
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004357 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07004358 result = copy_page_from_iter(
4359 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07004360#ifdef CONFIG_CIFS_SMB_DIRECT
4361 else if (rdata->mr)
4362 result = n;
4363#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004364 else
Long Li1dbe3462018-05-30 12:47:55 -07004365 result = cifs_read_page_from_socket(
4366 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07004367 if (result < 0)
4368 break;
4369
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004370 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004371 }
4372
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004373 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
4374 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004375}
4376
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004377static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004378cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
4379 struct cifs_readdata *rdata, unsigned int len)
4380{
4381 return readpages_fill_pages(server, rdata, NULL, len);
4382}
4383
4384static int
4385cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
4386 struct cifs_readdata *rdata,
4387 struct iov_iter *iter)
4388{
4389 return readpages_fill_pages(server, rdata, iter, iter->count);
4390}
4391
David Howells052e04a2022-01-27 16:02:42 +00004392static void cifs_readahead(struct readahead_control *ractl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393{
Jeff Layton690c5e32011-10-19 15:30:16 -04004394 int rc;
David Howells052e04a2022-01-27 16:02:42 +00004395 struct cifsFileInfo *open_file = ractl->file->private_data;
4396 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004397 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04004398 pid_t pid;
David Howells0174ee92022-01-27 16:02:58 +00004399 unsigned int xid, nr_pages, last_batch_size = 0, cache_nr_pages = 0;
4400 pgoff_t next_cached = ULONG_MAX;
4401 bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) &&
4402 cifs_inode_cookie(ractl->mapping->host)->cache_priv;
4403 bool check_cache = caching;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404
Steve French0cb012d2018-10-11 01:01:02 -05004405 xid = get_xid();
Suresh Jayaraman566982362010-07-05 18:13:25 +05304406
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004407 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4408 pid = open_file->pid;
4409 else
4410 pid = current->tgid;
4411
Jeff Layton690c5e32011-10-19 15:30:16 -04004412 rc = 0;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004413 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414
Joe Perchesf96637b2013-05-04 22:12:25 -05004415 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
David Howells052e04a2022-01-27 16:02:42 +00004416 __func__, ractl->file, ractl->mapping, readahead_count(ractl));
Jeff Layton690c5e32011-10-19 15:30:16 -04004417
4418 /*
David Howells052e04a2022-01-27 16:02:42 +00004419 * Chop the readahead request up into rsize-sized read requests.
Jeff Layton690c5e32011-10-19 15:30:16 -04004420 */
David Howells0174ee92022-01-27 16:02:58 +00004421 while ((nr_pages = readahead_count(ractl) - last_batch_size)) {
4422 unsigned int i, got, rsize;
David Howells052e04a2022-01-27 16:02:42 +00004423 struct page *page;
Jeff Layton690c5e32011-10-19 15:30:16 -04004424 struct cifs_readdata *rdata;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004425 struct cifs_credits credits_on_stack;
4426 struct cifs_credits *credits = &credits_on_stack;
David Howells0174ee92022-01-27 16:02:58 +00004427 pgoff_t index = readahead_index(ractl) + last_batch_size;
4428
4429 /*
4430 * Find out if we have anything cached in the range of
4431 * interest, and if so, where the next chunk of cached data is.
4432 */
4433 if (caching) {
4434 if (check_cache) {
4435 rc = cifs_fscache_query_occupancy(
4436 ractl->mapping->host, index, nr_pages,
4437 &next_cached, &cache_nr_pages);
4438 if (rc < 0)
4439 caching = false;
4440 check_cache = false;
4441 }
4442
4443 if (index == next_cached) {
4444 /*
4445 * TODO: Send a whole batch of pages to be read
4446 * by the cache.
4447 */
4448 page = readahead_page(ractl);
David Howells46f5cbd2022-01-31 17:54:43 +00004449 last_batch_size = 1 << thp_order(page);
David Howells0174ee92022-01-27 16:02:58 +00004450 if (cifs_readpage_from_fscache(ractl->mapping->host,
4451 page) < 0) {
4452 /*
4453 * TODO: Deal with cache read failure
4454 * here, but for the moment, delegate
4455 * that to readpage.
4456 */
4457 caching = false;
4458 }
4459 unlock_page(page);
4460 next_cached++;
4461 cache_nr_pages--;
4462 if (cache_nr_pages == 0)
4463 check_cache = true;
4464 continue;
4465 }
4466 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004468 if (open_file->invalidHandle) {
4469 rc = cifs_reopen_file(open_file, true);
David Howells052e04a2022-01-27 16:02:42 +00004470 if (rc) {
4471 if (rc == -EAGAIN)
4472 continue;
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004473 break;
David Howells052e04a2022-01-27 16:02:42 +00004474 }
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004475 }
4476
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10004477 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004478 &rsize, credits);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004479 if (rc)
4480 break;
David Howells052e04a2022-01-27 16:02:42 +00004481 nr_pages = min_t(size_t, rsize / PAGE_SIZE, readahead_count(ractl));
David Howells0174ee92022-01-27 16:02:58 +00004482 nr_pages = min_t(size_t, nr_pages, next_cached - index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483
Jeff Layton690c5e32011-10-19 15:30:16 -04004484 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004485 * Give up immediately if rsize is too small to read an entire
4486 * page. The VFS will fall back to readpage. We should never
4487 * reach this point however since we set ra_pages to 0 when the
4488 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04004489 */
David Howells052e04a2022-01-27 16:02:42 +00004490 if (unlikely(!nr_pages)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004491 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04004492 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04004493 }
4494
Jeff Layton0471ca32012-05-16 07:13:16 -04004495 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04004496 if (!rdata) {
4497 /* best to give up if we're out of mem */
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004498 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04004499 break;
4500 }
4501
David Howells052e04a2022-01-27 16:02:42 +00004502 got = __readahead_batch(ractl, rdata->pages, nr_pages);
4503 if (got != nr_pages) {
4504 pr_warn("__readahead_batch() returned %u/%u\n",
4505 got, nr_pages);
4506 nr_pages = got;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004507 }
Jeff Layton690c5e32011-10-19 15:30:16 -04004508
David Howells052e04a2022-01-27 16:02:42 +00004509 rdata->nr_pages = nr_pages;
4510 rdata->bytes = readahead_batch_length(ractl);
4511 rdata->cfile = cifsFileInfo_get(open_file);
4512 rdata->server = server;
4513 rdata->mapping = ractl->mapping;
4514 rdata->offset = readahead_pos(ractl);
4515 rdata->pid = pid;
4516 rdata->pagesz = PAGE_SIZE;
4517 rdata->tailsz = PAGE_SIZE;
4518 rdata->read_into_pages = cifs_readpages_read_into_pages;
4519 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
4520 rdata->credits = credits_on_stack;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004521
David Howells052e04a2022-01-27 16:02:42 +00004522 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004523 if (!rc) {
4524 if (rdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004525 rc = -EAGAIN;
4526 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004527 rc = server->ops->async_readv(rdata);
4528 }
4529
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004530 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004531 add_credits_and_wake_if(server, &rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004532 for (i = 0; i < rdata->nr_pages; i++) {
4533 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04004534 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004535 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04004537 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04004538 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004539 break;
4540 }
Jeff Layton6993f742012-05-16 07:13:17 -04004541
4542 kref_put(&rdata->refcount, cifs_readdata_release);
David Howells052e04a2022-01-27 16:02:42 +00004543 last_batch_size = nr_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544 }
4545
Steve French0cb012d2018-10-11 01:01:02 -05004546 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004547}
4548
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01004549/*
4550 * cifs_readpage_worker must be called with the page pinned
4551 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004552static int cifs_readpage_worker(struct file *file, struct page *page,
4553 loff_t *poffset)
4554{
4555 char *read_data;
4556 int rc;
4557
Suresh Jayaraman566982362010-07-05 18:13:25 +05304558 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05004559 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304560 if (rc == 0)
4561 goto read_complete;
4562
Linus Torvalds1da177e2005-04-16 15:20:36 -07004563 read_data = kmap(page);
4564 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004565
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004566 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004567
Linus Torvalds1da177e2005-04-16 15:20:36 -07004568 if (rc < 0)
4569 goto io_error;
4570 else
Joe Perchesf96637b2013-05-04 22:12:25 -05004571 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004572
Steve French9b9c5be2018-09-22 12:07:06 -05004573 /* we do not want atime to be less than mtime, it broke some apps */
4574 file_inode(file)->i_atime = current_time(file_inode(file));
4575 if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4576 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4577 else
4578 file_inode(file)->i_atime = current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004579
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004580 if (PAGE_SIZE > rc)
4581 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582
4583 flush_dcache_page(page);
4584 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304585
4586 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05004587 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304588
Linus Torvalds1da177e2005-04-16 15:20:36 -07004589 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004590
Linus Torvalds1da177e2005-04-16 15:20:36 -07004591io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004592 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004593 unlock_page(page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304594
4595read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596 return rc;
4597}
4598
4599static int cifs_readpage(struct file *file, struct page *page)
4600{
Steve Frenchf2a26a32021-07-23 18:35:15 -05004601 loff_t offset = page_file_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004602 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004603 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004605 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606
4607 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304608 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004609 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304610 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004611 }
4612
Joe Perchesf96637b2013-05-04 22:12:25 -05004613 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00004614 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615
4616 rc = cifs_readpage_worker(file, page, &offset);
4617
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004618 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004619 return rc;
4620}
4621
Steve Frencha403a0a2007-07-26 15:54:16 +00004622static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4623{
4624 struct cifsFileInfo *open_file;
4625
Dave Wysochanskicb248812019-10-03 15:16:27 +10004626 spin_lock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004627 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04004628 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Dave Wysochanskicb248812019-10-03 15:16:27 +10004629 spin_unlock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004630 return 1;
4631 }
4632 }
Dave Wysochanskicb248812019-10-03 15:16:27 +10004633 spin_unlock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004634 return 0;
4635}
4636
Linus Torvalds1da177e2005-04-16 15:20:36 -07004637/* We do not want to update the file size from server for inodes
4638 open for write - to avoid races with writepage extending
4639 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004640 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641 but this is tricky to do without racing with writebehind
4642 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00004643bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004644{
Steve Frencha403a0a2007-07-26 15:54:16 +00004645 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00004646 return true;
Steve French23e7dd72005-10-20 13:44:56 -07004647
Steve Frencha403a0a2007-07-26 15:54:16 +00004648 if (is_inode_writable(cifsInode)) {
4649 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08004650 struct cifs_sb_info *cifs_sb;
4651
Steve Frenchc32a0b62006-01-12 14:41:28 -08004652 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00004653 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004654 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08004655 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00004656 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08004657 }
4658
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004659 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00004660 return true;
Steve French7ba526312007-02-08 18:14:13 +00004661
Steve French4b18f2a2008-04-29 00:06:05 +00004662 return false;
Steve French23e7dd72005-10-20 13:44:56 -07004663 } else
Steve French4b18f2a2008-04-29 00:06:05 +00004664 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665}
4666
Nick Piggind9414772008-09-24 11:32:59 -04004667static int cifs_write_begin(struct file *file, struct address_space *mapping,
4668 loff_t pos, unsigned len, unsigned flags,
4669 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670{
Sachin Prabhu466bd312013-09-13 14:11:57 +01004671 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004672 pgoff_t index = pos >> PAGE_SHIFT;
4673 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004674 loff_t page_start = pos & PAGE_MASK;
4675 loff_t i_size;
4676 struct page *page;
4677 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678
Joe Perchesf96637b2013-05-04 22:12:25 -05004679 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04004680
Sachin Prabhu466bd312013-09-13 14:11:57 +01004681start:
Nick Piggin54566b22009-01-04 12:00:53 -08004682 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004683 if (!page) {
4684 rc = -ENOMEM;
4685 goto out;
4686 }
Nick Piggind9414772008-09-24 11:32:59 -04004687
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004688 if (PageUptodate(page))
4689 goto out;
Steve French8a236262007-03-06 00:31:00 +00004690
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004691 /*
4692 * If we write a full page it will be up to date, no need to read from
4693 * the server. If the write is short, we'll end up doing a sync write
4694 * instead.
4695 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004696 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004697 goto out;
4698
4699 /*
4700 * optimize away the read when we have an oplock, and we're not
4701 * expecting to use any of the data we'd be reading in. That
4702 * is, when the page lies beyond the EOF, or straddles the EOF
4703 * and the write will cover all of the existing data.
4704 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004705 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004706 i_size = i_size_read(mapping->host);
4707 if (page_start >= i_size ||
4708 (offset == 0 && (pos + len) >= i_size)) {
4709 zero_user_segments(page, 0, offset,
4710 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004711 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004712 /*
4713 * PageChecked means that the parts of the page
4714 * to which we're not writing are considered up
4715 * to date. Once the data is copied to the
4716 * page, it can be set uptodate.
4717 */
4718 SetPageChecked(page);
4719 goto out;
4720 }
4721 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004722
Sachin Prabhu466bd312013-09-13 14:11:57 +01004723 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004724 /*
4725 * might as well read a page, it is fast enough. If we get
4726 * an error, we don't need to return it. cifs_write_end will
4727 * do a sync write instead since PG_uptodate isn't set.
4728 */
4729 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004730 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004731 oncethru = 1;
4732 goto start;
Steve French8a236262007-03-06 00:31:00 +00004733 } else {
4734 /* we could try using another file handle if there is one -
4735 but how would we lock it to prevent close of that handle
4736 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04004737 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00004738 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004739out:
4740 *pagep = page;
4741 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742}
4743
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304744static int cifs_release_page(struct page *page, gfp_t gfp)
4745{
4746 if (PagePrivate(page))
4747 return 0;
David Howells70431bf2020-11-17 15:56:59 +00004748 if (PageFsCache(page)) {
4749 if (current_is_kswapd() || !(gfp & __GFP_FS))
4750 return false;
4751 wait_on_page_fscache(page);
4752 }
4753 fscache_note_page_release(cifs_inode_cookie(page->mapping->host));
4754 return true;
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304755}
4756
Lukas Czernerd47992f2013-05-21 23:17:23 -04004757static void cifs_invalidate_page(struct page *page, unsigned int offset,
4758 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304759{
David Howells70431bf2020-11-17 15:56:59 +00004760 wait_on_page_fscache(page);
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304761}
4762
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004763static int cifs_launder_page(struct page *page)
4764{
4765 int rc = 0;
4766 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004767 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004768 struct writeback_control wbc = {
4769 .sync_mode = WB_SYNC_ALL,
4770 .nr_to_write = 0,
4771 .range_start = range_start,
4772 .range_end = range_end,
4773 };
4774
Joe Perchesf96637b2013-05-04 22:12:25 -05004775 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004776
4777 if (clear_page_dirty_for_io(page))
4778 rc = cifs_writepage_locked(page, &wbc);
4779
David Howells70431bf2020-11-17 15:56:59 +00004780 wait_on_page_fscache(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004781 return rc;
4782}
4783
Tejun Heo9b646972010-07-20 22:09:02 +02004784void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04004785{
4786 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4787 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00004788 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004789 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004790 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004791 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004792 int rc = 0;
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004793 bool purge_cache = false;
Rohith Surabattulac3f207a2021-04-13 00:26:42 -05004794 bool is_deferred = false;
4795 struct cifs_deferred_close *dclose;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004796
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004797 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10004798 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004799
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004800 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
4801 cfile->oplock_epoch, &purge_cache);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004802
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004803 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004804 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004805 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4806 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004807 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004808 }
4809
Jeff Layton3bc303c2009-09-21 06:47:50 -04004810 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004811 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05004812 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00004813 else
Al Viro8737c932009-12-24 06:47:55 -05004814 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004815 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004816 if (!CIFS_CACHE_READ(cinode) || purge_cache) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004817 rc = filemap_fdatawait(inode->i_mapping);
4818 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004819 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004820 }
Joe Perchesf96637b2013-05-04 22:12:25 -05004821 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004822 if (CIFS_CACHE_WRITE(cinode))
4823 goto oplock_break_ack;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004824 }
4825
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004826 rc = cifs_push_locks(cfile);
4827 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004828 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004829
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004830oplock_break_ack:
Jeff Layton3bc303c2009-09-21 06:47:50 -04004831 /*
Rohith Surabattula9e992752021-08-09 09:32:46 +00004832 * When oplock break is received and there are no active
4833 * file handles but cached, then schedule deferred close immediately.
4834 * So, new open will not use cached handle.
4835 */
4836 spin_lock(&CIFS_I(inode)->deferred_lock);
4837 is_deferred = cifs_is_deferred_close(cfile, &dclose);
4838 spin_unlock(&CIFS_I(inode)->deferred_lock);
4839 if (is_deferred &&
4840 cfile->deferred_close_scheduled &&
4841 delayed_work_pending(&cfile->deferred)) {
4842 if (cancel_delayed_work(&cfile->deferred)) {
4843 _cifsFileInfo_put(cfile, false, false);
4844 goto oplock_break_done;
4845 }
4846 }
4847 /*
Jeff Layton3bc303c2009-09-21 06:47:50 -04004848 * releasing stale oplock after recent reconnect of smb session using
4849 * a now incorrect file handle is not a data integrity issue but do
4850 * not bother sending an oplock release if session to server still is
4851 * disconnected since oplock already released by the server
4852 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00004853 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004854 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4855 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05004856 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004857 }
Rohith Surabattula9e992752021-08-09 09:32:46 +00004858oplock_break_done:
Ronnie Sahlberg32546a92019-11-03 13:06:37 +10004859 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004860 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004861}
4862
Steve Frenchdca69282013-11-11 16:42:37 -06004863/*
4864 * The presence of cifs_direct_io() in the address space ops vector
4865 * allowes open() O_DIRECT flags which would have failed otherwise.
4866 *
4867 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4868 * so this method should never be called.
4869 *
David Howells052e04a2022-01-27 16:02:42 +00004870 * Direct IO is not yet supported in the cached mode.
Steve Frenchdca69282013-11-11 16:42:37 -06004871 */
4872static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07004873cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06004874{
4875 /*
4876 * FIXME
4877 * Eventually need to support direct IO for non forcedirectio mounts
4878 */
4879 return -EINVAL;
4880}
4881
Steve French4e8aea32020-04-09 21:42:18 -05004882static int cifs_swap_activate(struct swap_info_struct *sis,
4883 struct file *swap_file, sector_t *span)
4884{
4885 struct cifsFileInfo *cfile = swap_file->private_data;
4886 struct inode *inode = swap_file->f_mapping->host;
4887 unsigned long blocks;
4888 long long isize;
4889
4890 cifs_dbg(FYI, "swap activate\n");
4891
4892 spin_lock(&inode->i_lock);
4893 blocks = inode->i_blocks;
4894 isize = inode->i_size;
4895 spin_unlock(&inode->i_lock);
4896 if (blocks*512 < isize) {
4897 pr_warn("swap activate: swapfile has holes\n");
4898 return -EINVAL;
4899 }
4900 *span = sis->pages;
4901
Joe Perchesa0a30362020-04-14 22:42:53 -07004902 pr_warn_once("Swap support over SMB3 is experimental\n");
Steve French4e8aea32020-04-09 21:42:18 -05004903
4904 /*
4905 * TODO: consider adding ACL (or documenting how) to prevent other
4906 * users (on this or other systems) from reading it
4907 */
4908
4909
4910 /* TODO: add sk_set_memalloc(inet) or similar */
4911
4912 if (cfile)
4913 cfile->swapfile = true;
4914 /*
4915 * TODO: Since file already open, we can't open with DENY_ALL here
4916 * but we could add call to grab a byte range lock to prevent others
4917 * from reading or writing the file
4918 */
4919
4920 return 0;
4921}
4922
4923static void cifs_swap_deactivate(struct file *file)
4924{
4925 struct cifsFileInfo *cfile = file->private_data;
4926
4927 cifs_dbg(FYI, "swap deactivate\n");
4928
4929 /* TODO: undo sk_set_memalloc(inet) will eventually be needed */
4930
4931 if (cfile)
4932 cfile->swapfile = false;
4933
4934 /* do we need to unpin (or unlock) the file */
4935}
Steve Frenchdca69282013-11-11 16:42:37 -06004936
David Howells70431bf2020-11-17 15:56:59 +00004937/*
4938 * Mark a page as having been made dirty and thus needing writeback. We also
4939 * need to pin the cache object to write back to.
4940 */
4941#ifdef CONFIG_CIFS_FSCACHE
4942static int cifs_set_page_dirty(struct page *page)
4943{
4944 return fscache_set_page_dirty(page, cifs_inode_cookie(page->mapping->host));
4945}
4946#else
4947#define cifs_set_page_dirty __set_page_dirty_nobuffers
4948#endif
4949
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004950const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004951 .readpage = cifs_readpage,
David Howells052e04a2022-01-27 16:02:42 +00004952 .readahead = cifs_readahead,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004953 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07004954 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004955 .write_begin = cifs_write_begin,
4956 .write_end = cifs_write_end,
David Howells70431bf2020-11-17 15:56:59 +00004957 .set_page_dirty = cifs_set_page_dirty,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304958 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06004959 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304960 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004961 .launder_page = cifs_launder_page,
Steve French4e8aea32020-04-09 21:42:18 -05004962 /*
4963 * TODO: investigate and if useful we could add an cifs_migratePage
4964 * helper (under an CONFIG_MIGRATION) in the future, and also
4965 * investigate and add an is_dirty_writeback helper if needed
4966 */
4967 .swap_activate = cifs_swap_activate,
4968 .swap_deactivate = cifs_swap_deactivate,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004970
4971/*
4972 * cifs_readpages requires the server to support a buffer large enough to
4973 * contain the header plus one complete page of data. Otherwise, we need
4974 * to leave cifs_readpages out of the address space operations.
4975 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004976const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004977 .readpage = cifs_readpage,
4978 .writepage = cifs_writepage,
4979 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004980 .write_begin = cifs_write_begin,
4981 .write_end = cifs_write_end,
David Howells70431bf2020-11-17 15:56:59 +00004982 .set_page_dirty = cifs_set_page_dirty,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304983 .releasepage = cifs_release_page,
4984 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004985 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004986};