blob: 9fee3af83a7369ba7d748ca3b36c66d0f7808cfc [file] [log] [blame]
Steve French929be902021-06-18 00:31:49 -05001// SPDX-License-Identifier: LGPL-2.1
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
11#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070012#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/stat.h>
14#include <linux/fcntl.h>
15#include <linux/pagemap.h>
16#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070017#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080018#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070019#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040020#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040022#include <linux/swap.h>
Nikolay Borisovf86196e2019-01-03 15:29:02 -080023#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/div64.h>
25#include "cifsfs.h"
26#include "cifspdu.h"
27#include "cifsglob.h"
28#include "cifsproto.h"
29#include "cifs_unicode.h"
30#include "cifs_debug.h"
31#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053032#include "fscache.h"
Long Libd3dcc62017-11-22 17:38:47 -070033#include "smbdirect.h"
Ronnie Sahlberg8401e932020-12-12 13:40:50 -060034#include "fs_context.h"
Steve French087f7572021-04-29 00:18:43 -050035#include "cifs_ioctl.h"
Steve French07b92d02013-02-18 10:34:26 -060036
Linus Torvalds1da177e2005-04-16 15:20:36 -070037static inline int cifs_convert_flags(unsigned int flags)
38{
39 if ((flags & O_ACCMODE) == O_RDONLY)
40 return GENERIC_READ;
41 else if ((flags & O_ACCMODE) == O_WRONLY)
42 return GENERIC_WRITE;
43 else if ((flags & O_ACCMODE) == O_RDWR) {
44 /* GENERIC_ALL is too much permission to request
45 can cause unnecessary access denied on create */
46 /* return GENERIC_ALL; */
47 return (GENERIC_READ | GENERIC_WRITE);
48 }
49
Jeff Laytone10f7b52008-05-14 10:21:33 -070050 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
51 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
52 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000053}
Jeff Laytone10f7b52008-05-14 10:21:33 -070054
Jeff Layton608712f2010-10-15 15:33:56 -040055static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000056{
Jeff Layton608712f2010-10-15 15:33:56 -040057 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070058
Steve French7fc8f4e2009-02-23 20:43:11 +000059 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040060 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000061 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040062 posix_flags = SMB_O_WRONLY;
63 else if ((flags & O_ACCMODE) == O_RDWR)
64 posix_flags = SMB_O_RDWR;
65
Steve French07b92d02013-02-18 10:34:26 -060066 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040067 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060068 if (flags & O_EXCL)
69 posix_flags |= SMB_O_EXCL;
70 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050071 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
72 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060073
Jeff Layton608712f2010-10-15 15:33:56 -040074 if (flags & O_TRUNC)
75 posix_flags |= SMB_O_TRUNC;
76 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010077 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040078 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000079 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040080 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000081 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040082 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000083 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040084 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000085
86 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
89static inline int cifs_get_disposition(unsigned int flags)
90{
91 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
92 return FILE_CREATE;
93 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
94 return FILE_OVERWRITE_IF;
95 else if ((flags & O_CREAT) == O_CREAT)
96 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +000097 else if ((flags & O_TRUNC) == O_TRUNC)
98 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 else
100 return FILE_OPEN;
101}
102
Al Virof6f1f172021-03-18 15:44:05 -0400103int cifs_posix_open(const char *full_path, struct inode **pinode,
Jeff Layton608712f2010-10-15 15:33:56 -0400104 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400105 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400106{
107 int rc;
108 FILE_UNIX_BASIC_INFO *presp_data;
109 __u32 posix_flags = 0;
110 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
111 struct cifs_fattr fattr;
112 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000113 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400114
Joe Perchesf96637b2013-05-04 22:12:25 -0500115 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400116
117 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
118 if (presp_data == NULL)
119 return -ENOMEM;
120
121 tlink = cifs_sb_tlink(cifs_sb);
122 if (IS_ERR(tlink)) {
123 rc = PTR_ERR(tlink);
124 goto posix_open_ret;
125 }
126
127 tcon = tlink_tcon(tlink);
128 mode &= ~current_umask();
129
130 posix_flags = cifs_posix_convert_flags(f_flags);
131 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
132 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900133 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400134 cifs_put_tlink(tlink);
135
136 if (rc)
137 goto posix_open_ret;
138
139 if (presp_data->Type == cpu_to_le32(-1))
140 goto posix_open_ret; /* open ok, caller does qpathinfo */
141
142 if (!pinode)
143 goto posix_open_ret; /* caller does not need info */
144
145 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
146
147 /* get new inode and set it up */
148 if (*pinode == NULL) {
149 cifs_fill_uniqueid(sb, &fattr);
150 *pinode = cifs_iget(sb, &fattr);
151 if (!*pinode) {
152 rc = -ENOMEM;
153 goto posix_open_ret;
154 }
155 } else {
Ronnie Sahlbergcee8f4f2021-03-25 16:26:35 +1000156 cifs_revalidate_mapping(*pinode);
Al Viro4d669522021-02-10 21:23:04 -0500157 rc = cifs_fattr_to_inode(*pinode, &fattr);
Jeff Layton608712f2010-10-15 15:33:56 -0400158 }
159
160posix_open_ret:
161 kfree(presp_data);
162 return rc;
163}
164
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300165static int
Al Virof6f1f172021-03-18 15:44:05 -0400166cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700167 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
168 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300169{
170 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700171 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300172 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500173 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700175 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400176 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700178 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700179 return -ENOSYS;
180
181 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300182
183/*********************************************************************
184 * open flag mapping table:
185 *
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
193 *
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
199 *?
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
206
207 disposition = cifs_get_disposition(f_flags);
208
209 /* BB pass O_SYNC flag through on file attributes .. BB */
210
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
214
Steve French1013e762017-09-22 01:40:27 -0500215 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
216 if (f_flags & O_SYNC)
217 create_options |= CREATE_WRITE_THROUGH;
218
219 if (f_flags & O_DIRECT)
220 create_options |= CREATE_NO_BUFFER;
221
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400222 oparms.tcon = tcon;
223 oparms.cifs_sb = cifs_sb;
224 oparms.desired_access = desired_access;
Amir Goldstein0f060932020-02-03 21:46:43 +0200225 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400226 oparms.disposition = disposition;
227 oparms.path = full_path;
228 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400229 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400230
231 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300232
233 if (rc)
234 goto out;
235
Steve Frenchd3138522020-06-11 22:43:01 -0500236 /* TODO: Add support for calling posix query info but with passing in fid */
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300237 if (tcon->unix_ext)
238 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
239 xid);
240 else
241 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600242 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300243
Pavel Shilovsky30573a822019-09-30 10:06:18 -0700244 if (rc) {
245 server->ops->close(xid, tcon, fid);
246 if (rc == -ESTALE)
247 rc = -EOPENSTALE;
248 }
249
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300250out:
251 kfree(buf);
252 return rc;
253}
254
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400255static bool
256cifs_has_mand_locks(struct cifsInodeInfo *cinode)
257{
258 struct cifs_fid_locks *cur;
259 bool has_locks = false;
260
261 down_read(&cinode->lock_sem);
262 list_for_each_entry(cur, &cinode->llist, llist) {
263 if (!list_empty(&cur->locks)) {
264 has_locks = true;
265 break;
266 }
267 }
268 up_read(&cinode->lock_sem);
269 return has_locks;
270}
271
Dave Wysochanskid46b0da2019-10-23 05:02:33 -0400272void
273cifs_down_write(struct rw_semaphore *sem)
274{
275 while (!down_write_trylock(sem))
276 msleep(10);
277}
278
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000279static void cifsFileInfo_put_work(struct work_struct *work);
280
Jeff Layton15ecb432010-10-15 15:34:02 -0400281struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700282cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400283 struct tcon_link *tlink, __u32 oplock)
284{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500285 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000286 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700287 struct cifsInodeInfo *cinode = CIFS_I(inode);
288 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700289 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700290 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400291 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400292
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700293 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
294 if (cfile == NULL)
295 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400296
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700297 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
298 if (!fdlocks) {
299 kfree(cfile);
300 return NULL;
301 }
302
303 INIT_LIST_HEAD(&fdlocks->locks);
304 fdlocks->cfile = cfile;
305 cfile->llist = fdlocks;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700306
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700307 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700308 cfile->pid = current->tgid;
309 cfile->uid = current_fsuid();
310 cfile->dentry = dget(dentry);
311 cfile->f_flags = file->f_flags;
312 cfile->invalidHandle = false;
Rohith Surabattula860b69a2021-05-05 10:56:47 +0000313 cfile->deferred_close_scheduled = false;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700314 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700315 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000316 INIT_WORK(&cfile->put, cifsFileInfo_put_work);
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500317 INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700318 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500319 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400320
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100321 cifs_sb_active(inode->i_sb);
322
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400323 /*
324 * If the server returned a read oplock and we have mandatory brlocks,
325 * set oplock level to None.
326 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400327 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500328 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400329 oplock = 0;
330 }
331
Pavel Shilovsky6f582b22019-11-27 16:18:39 -0800332 cifs_down_write(&cinode->lock_sem);
333 list_add(&fdlocks->llist, &cinode->llist);
334 up_write(&cinode->lock_sem);
335
Steve French3afca262016-09-22 18:58:16 -0500336 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400337 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700338 oplock = fid->pending_open->oplock;
339 list_del(&fid->pending_open->olist);
340
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400341 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400342 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700343
344 list_add(&cfile->tlist, &tcon->openFileList);
Steve Frenchfae80442018-10-19 17:14:32 -0500345 atomic_inc(&tcon->num_local_opens);
Steve French3afca262016-09-22 18:58:16 -0500346
Jeff Layton15ecb432010-10-15 15:34:02 -0400347 /* if readable file instance put first in list*/
Ronnie Sahlberg487317c2019-06-05 10:38:38 +1000348 spin_lock(&cinode->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400349 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700350 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400351 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700352 list_add_tail(&cfile->flist, &cinode->openFileList);
Ronnie Sahlberg487317c2019-06-05 10:38:38 +1000353 spin_unlock(&cinode->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500354 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400355
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400356 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400357 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400358
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700359 file->private_data = cfile;
360 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400361}
362
Jeff Layton764a1b12012-07-25 14:59:54 -0400363struct cifsFileInfo *
364cifsFileInfo_get(struct cifsFileInfo *cifs_file)
365{
Steve French3afca262016-09-22 18:58:16 -0500366 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400367 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500368 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400369 return cifs_file;
370}
371
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000372static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
373{
374 struct inode *inode = d_inode(cifs_file->dentry);
375 struct cifsInodeInfo *cifsi = CIFS_I(inode);
376 struct cifsLockInfo *li, *tmp;
377 struct super_block *sb = inode->i_sb;
378
Shyam Prasad N18d04062021-08-10 10:22:28 +0000379 cifs_fscache_release_inode_cookie(inode);
380
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000381 /*
382 * Delete any outstanding lock records. We'll lose them when the file
383 * is closed anyway.
384 */
385 cifs_down_write(&cifsi->lock_sem);
386 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
387 list_del(&li->llist);
388 cifs_del_lock_waiters(li);
389 kfree(li);
390 }
391 list_del(&cifs_file->llist->llist);
392 kfree(cifs_file->llist);
393 up_write(&cifsi->lock_sem);
394
395 cifs_put_tlink(cifs_file->tlink);
396 dput(cifs_file->dentry);
397 cifs_sb_deactive(sb);
398 kfree(cifs_file);
399}
400
401static void cifsFileInfo_put_work(struct work_struct *work)
402{
403 struct cifsFileInfo *cifs_file = container_of(work,
404 struct cifsFileInfo, put);
405
406 cifsFileInfo_put_final(cifs_file);
407}
408
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100409/**
410 * cifsFileInfo_put - release a reference of file priv data
411 *
412 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
Steve French607dfc72020-12-12 12:08:58 -0600413 *
414 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000415 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400416void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
417{
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000418 _cifsFileInfo_put(cifs_file, true, true);
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100419}
420
421/**
422 * _cifsFileInfo_put - release a reference of file priv data
423 *
424 * This may involve closing the filehandle @cifs_file out on the
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000425 * server. Must be called without holding tcon->open_file_lock,
426 * cinode->open_file_lock and cifs_file->file_info_lock.
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100427 *
428 * If @wait_for_oplock_handler is true and we are releasing the last
429 * reference, wait for any running oplock break handler of the file
Steve French607dfc72020-12-12 12:08:58 -0600430 * and cancel any pending one.
431 *
432 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
433 * @wait_oplock_handler: must be false if called from oplock_break_handler
434 * @offload: not offloaded on close and oplock breaks
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100435 *
436 */
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000437void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
438 bool wait_oplock_handler, bool offload)
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100439{
David Howells2b0143b2015-03-17 22:25:59 +0000440 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000441 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700442 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300443 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100444 struct super_block *sb = inode->i_sb;
445 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700446 struct cifs_fid fid;
447 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000448 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000449
Steve French3afca262016-09-22 18:58:16 -0500450 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky1a67c412019-10-23 15:37:19 -0700451 spin_lock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500452 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400453 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500454 spin_unlock(&cifs_file->file_info_lock);
Pavel Shilovsky1a67c412019-10-23 15:37:19 -0700455 spin_unlock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500456 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000457 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400458 }
Steve French3afca262016-09-22 18:58:16 -0500459 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000460
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700461 if (server->ops->get_lease_key)
462 server->ops->get_lease_key(inode, &fid);
463
464 /* store open in pending opens to make sure we don't miss lease break */
465 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
466
Steve Frenchcdff08e2010-10-21 22:46:14 +0000467 /* remove it from the lists */
468 list_del(&cifs_file->flist);
469 list_del(&cifs_file->tlist);
Steve Frenchfae80442018-10-19 17:14:32 -0500470 atomic_dec(&tcon->num_local_opens);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000471
472 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500473 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000474 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700475 /*
476 * In strict cache mode we need invalidate mapping on the last
477 * close because it may cause a error when we open this file
478 * again and get at least level II oplock.
479 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300480 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400481 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300482 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000483 }
Steve French3afca262016-09-22 18:58:16 -0500484
Pavel Shilovsky1a67c412019-10-23 15:37:19 -0700485 spin_unlock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500486 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000487
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100488 oplock_break_cancelled = wait_oplock_handler ?
489 cancel_work_sync(&cifs_file->oplock_break) : false;
Jeff Laytonad635942011-07-26 12:20:17 -0400490
Steve Frenchcdff08e2010-10-21 22:46:14 +0000491 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700492 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400493 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700494
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400495 xid = get_xid();
Steve French43f8a6a2019-12-02 21:46:54 -0600496 if (server->ops->close_getattr)
497 server->ops->close_getattr(xid, tcon, cifs_file);
498 else if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400499 server->ops->close(xid, tcon, &cifs_file->fid);
500 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000501 }
502
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000503 if (oplock_break_cancelled)
504 cifs_done_oplock_break(cifsi);
505
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700506 cifs_del_pending_open(&open);
507
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000508 if (offload)
509 queue_work(fileinfo_put_wq, &cifs_file->put);
510 else
511 cifsFileInfo_put_final(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400512}
513
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516{
517 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400518 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400519 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700521 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000522 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400523 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700524 struct cifsFileInfo *cfile = NULL;
Al Virof6a9bc32021-03-05 17:36:04 -0500525 void *page;
526 const char *full_path;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300527 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700528 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700529 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400531 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
533 cifs_sb = CIFS_SB(inode->i_sb);
Steve French087f7572021-04-29 00:18:43 -0500534 if (unlikely(cifs_forced_shutdown(cifs_sb))) {
535 free_xid(xid);
536 return -EIO;
537 }
538
Jeff Layton7ffec372010-09-29 19:51:11 -0400539 tlink = cifs_sb_tlink(cifs_sb);
540 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400541 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400542 return PTR_ERR(tlink);
543 }
544 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700545 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
Al Virof6a9bc32021-03-05 17:36:04 -0500547 page = alloc_dentry_path();
548 full_path = build_path_from_dentry(file_dentry(file), page);
549 if (IS_ERR(full_path)) {
550 rc = PTR_ERR(full_path);
Jeff Layton232341b2010-08-05 13:58:38 -0400551 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 }
553
Joe Perchesf96637b2013-05-04 22:12:25 -0500554 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000555 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000556
Namjae Jeon787aded2014-08-22 14:22:51 +0900557 if (file->f_flags & O_DIRECT &&
558 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
559 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
560 file->f_op = &cifs_file_direct_nobrl_ops;
561 else
562 file->f_op = &cifs_file_direct_ops;
563 }
564
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500565 /* Get the cached handle as SMB2 close is deferred */
566 rc = cifs_get_readable_path(tcon, full_path, &cfile);
567 if (rc == 0) {
568 if (file->f_flags == cfile->f_flags) {
569 file->private_data = cfile;
Rohith Surabattula860b69a2021-05-05 10:56:47 +0000570 spin_lock(&CIFS_I(inode)->deferred_lock);
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500571 cifs_del_deferred_close(cfile);
572 spin_unlock(&CIFS_I(inode)->deferred_lock);
573 goto out;
574 } else {
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500575 _cifsFileInfo_put(cfile, true, false);
576 }
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500577 }
578
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700579 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000580 oplock = REQ_OPLOCK;
581 else
582 oplock = 0;
583
Steve French64cc2c62009-03-04 19:54:08 +0000584 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400585 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
586 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000587 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400588 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Ronnie Sahlberg8401e932020-12-12 13:40:50 -0600589 cifs_sb->ctx->file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700590 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000591 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500592 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300593 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000594 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
595 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500596 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
Steve Frenchb438fcf2021-02-20 19:24:11 -0600597 tcon->ses->ip_addr,
Joe Perchesf96637b2013-05-04 22:12:25 -0500598 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000599 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000600 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
601 (rc != -EOPNOTSUPP)) /* path not found or net err */
602 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700603 /*
604 * Else fallthrough to retry open the old way on network i/o
605 * or DFS errors.
606 */
Steve French276a74a2009-03-03 18:00:34 +0000607 }
608
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700609 if (server->ops->get_lease_key)
610 server->ops->get_lease_key(inode, &fid);
611
612 cifs_add_pending_open(&fid, tlink, &open);
613
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300614 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700615 if (server->ops->get_lease_key)
616 server->ops->get_lease_key(inode, &fid);
617
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300618 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700619 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700620 if (rc) {
621 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300622 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700623 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300624 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400625
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700626 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
627 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700628 if (server->ops->close)
629 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700630 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 rc = -ENOMEM;
632 goto out;
633 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530635 cifs_fscache_set_inode_cookie(inode, file);
636
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300637 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700638 /*
639 * Time to set mode which we can not set earlier due to
640 * problems creating new read-only files.
641 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300642 struct cifs_unix_set_info_args args = {
643 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800644 .uid = INVALID_UID, /* no change */
645 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300646 .ctime = NO_CHANGE_64,
647 .atime = NO_CHANGE_64,
648 .mtime = NO_CHANGE_64,
649 .device = 0,
650 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700651 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
652 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 }
654
655out:
Al Virof6a9bc32021-03-05 17:36:04 -0500656 free_dentry_path(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400657 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400658 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 return rc;
660}
661
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400662static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
663
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700664/*
665 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400666 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700667 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400668static int
669cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400671 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000672 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400673 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 int rc = 0;
675
Rabin Vincent560d3882017-05-03 17:17:21 +0200676 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400677 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400678 /* can cache locks - no need to relock */
679 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400680 return rc;
681 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400683 if (cap_unix(tcon->ses) &&
684 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
685 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
686 rc = cifs_push_posix_locks(cfile);
687 else
688 rc = tcon->ses->server->ops->push_mand_locks(cfile);
689
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400690 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 return rc;
692}
693
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700694static int
695cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696{
697 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400698 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400699 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000701 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700702 struct TCP_Server_Info *server;
703 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000704 struct inode *inode;
Al Virof6a9bc32021-03-05 17:36:04 -0500705 void *page;
706 const char *full_path;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700707 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500709 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400710 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400712 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700713 mutex_lock(&cfile->fh_mutex);
714 if (!cfile->invalidHandle) {
715 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400716 free_xid(xid);
Al Virof6a9bc32021-03-05 17:36:04 -0500717 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 }
719
David Howells2b0143b2015-03-17 22:25:59 +0000720 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700722 tcon = tlink_tcon(cfile->tlink);
723 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000724
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700725 /*
726 * Can not grab rename sem here because various ops, including those
727 * that already have the rename sem can end up causing writepage to get
728 * called and if the server was down that means we end up here, and we
729 * can never tell if the caller already has the rename_sem.
730 */
Al Virof6a9bc32021-03-05 17:36:04 -0500731 page = alloc_dentry_path();
732 full_path = build_path_from_dentry(cfile->dentry, page);
733 if (IS_ERR(full_path)) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700734 mutex_unlock(&cfile->fh_mutex);
Al Virof6a9bc32021-03-05 17:36:04 -0500735 free_dentry_path(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400736 free_xid(xid);
Al Virof6a9bc32021-03-05 17:36:04 -0500737 return PTR_ERR(full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 }
739
Joe Perchesf96637b2013-05-04 22:12:25 -0500740 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
741 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300743 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 oplock = REQ_OPLOCK;
745 else
Steve French4b18f2a2008-04-29 00:06:05 +0000746 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400748 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000749 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400750 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400751 /*
752 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
753 * original open. Must mask them off for a reopen.
754 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700755 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400756 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400757
Jeff Layton2422f672010-06-16 13:40:16 -0400758 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Ronnie Sahlberg8401e932020-12-12 13:40:50 -0600759 cifs_sb->ctx->file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400760 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000761 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500762 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200763 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000764 goto reopen_success;
765 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700766 /*
767 * fallthrough to retry open the old way on errors, especially
768 * in the reconnect path it is important to retry hard
769 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000770 }
771
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700772 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000773
Pavel Shilovsky44805b02019-11-12 17:16:35 -0800774 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
775 if (cfile->f_flags & O_SYNC)
776 create_options |= CREATE_WRITE_THROUGH;
777
778 if (cfile->f_flags & O_DIRECT)
779 create_options |= CREATE_NO_BUFFER;
780
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700781 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400782 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700783
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400784 oparms.tcon = tcon;
785 oparms.cifs_sb = cifs_sb;
786 oparms.desired_access = desired_access;
Amir Goldstein0f060932020-02-03 21:46:43 +0200787 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400788 oparms.disposition = disposition;
789 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400790 oparms.fid = &cfile->fid;
791 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400792
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700793 /*
794 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400795 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700796 * file might have write behind data that needs to be flushed and server
797 * version of file size can be stale. If we knew for sure that inode was
798 * not dirty locally we could do this.
799 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400800 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400801 if (rc == -ENOENT && oparms.reconnect == false) {
802 /* durable handle timeout is expired - open the file again */
803 rc = server->ops->open(xid, &oparms, &oplock, NULL);
804 /* indicate that we need to relock the file */
805 oparms.reconnect = true;
806 }
807
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700809 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500810 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
811 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400812 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 }
Jeff Layton15886172010-10-15 15:33:59 -0400814
815reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700816 cfile->invalidHandle = false;
817 mutex_unlock(&cfile->fh_mutex);
818 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400819
820 if (can_flush) {
821 rc = filemap_write_and_wait(inode->i_mapping);
Pavel Shilovsky9a663962019-01-08 11:15:28 -0800822 if (!is_interrupt_error(rc))
823 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400824
Steve Frenchd3138522020-06-11 22:43:01 -0500825 if (tcon->posix_extensions)
826 rc = smb311_posix_get_inode_info(&inode, full_path, inode->i_sb, xid);
827 else if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700828 rc = cifs_get_inode_info_unix(&inode, full_path,
829 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400830 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700831 rc = cifs_get_inode_info(&inode, full_path, NULL,
832 inode->i_sb, xid, NULL);
833 }
834 /*
835 * Else we are writing out data to server already and could deadlock if
836 * we tried to flush data, and since we do not know if we have data that
837 * would invalidate the current end of file on the server we can not go
838 * to the server to get the new inode info.
839 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300840
Pavel Shilovskyde740252016-10-11 15:34:07 -0700841 /*
842 * If the server returned a read oplock and we have mandatory brlocks,
843 * set oplock level to None.
844 */
845 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
846 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
847 oplock = 0;
848 }
849
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400850 server->ops->set_fid(cfile, &cfile->fid, oplock);
851 if (oparms.reconnect)
852 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400853
854reopen_error_exit:
Al Virof6a9bc32021-03-05 17:36:04 -0500855 free_dentry_path(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400856 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 return rc;
858}
859
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500860void smb2_deferred_work_close(struct work_struct *work)
861{
862 struct cifsFileInfo *cfile = container_of(work,
863 struct cifsFileInfo, deferred.work);
864
865 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
866 cifs_del_deferred_close(cfile);
Rohith Surabattula860b69a2021-05-05 10:56:47 +0000867 cfile->deferred_close_scheduled = false;
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500868 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
869 _cifsFileInfo_put(cfile, true, false);
870}
871
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872int cifs_close(struct inode *inode, struct file *file)
873{
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500874 struct cifsFileInfo *cfile;
875 struct cifsInodeInfo *cinode = CIFS_I(inode);
876 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
877 struct cifs_deferred_close *dclose;
878
Jeff Layton77970692011-04-05 16:23:47 -0700879 if (file->private_data != NULL) {
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500880 cfile = file->private_data;
Jeff Layton77970692011-04-05 16:23:47 -0700881 file->private_data = NULL;
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500882 dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
883 if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
Rohith Surabattula0ab95c22021-05-17 11:28:34 +0000884 cinode->lease_granted &&
Rohith Surabattula35866f32021-09-17 16:50:40 -0500885 !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500886 dclose) {
Steve French4f222622021-09-23 12:42:35 -0500887 if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500888 inode->i_ctime = inode->i_mtime = current_time(inode);
Shyam Prasad N18d04062021-08-10 10:22:28 +0000889 cifs_fscache_update_inode_cookie(inode);
890 }
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500891 spin_lock(&cinode->deferred_lock);
892 cifs_add_deferred_close(cfile, dclose);
Rohith Surabattula860b69a2021-05-05 10:56:47 +0000893 if (cfile->deferred_close_scheduled &&
894 delayed_work_pending(&cfile->deferred)) {
Rohith Surabattula9687c852021-05-20 16:45:01 +0000895 /*
896 * If there is no pending work, mod_delayed_work queues new work.
897 * So, Increase the ref count to avoid use-after-free.
898 */
899 if (!mod_delayed_work(deferredclose_wq,
900 &cfile->deferred, cifs_sb->ctx->acregmax))
901 cifsFileInfo_get(cfile);
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500902 } else {
903 /* Deferred close for files */
904 queue_delayed_work(deferredclose_wq,
905 &cfile->deferred, cifs_sb->ctx->acregmax);
Rohith Surabattula860b69a2021-05-05 10:56:47 +0000906 cfile->deferred_close_scheduled = true;
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500907 spin_unlock(&cinode->deferred_lock);
908 return 0;
909 }
910 spin_unlock(&cinode->deferred_lock);
911 _cifsFileInfo_put(cfile, true, false);
912 } else {
913 _cifsFileInfo_put(cfile, true, false);
914 kfree(dclose);
915 }
Jeff Layton77970692011-04-05 16:23:47 -0700916 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917
Steve Frenchcdff08e2010-10-21 22:46:14 +0000918 /* return code from the ->release op is always ignored */
919 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920}
921
Steve French52ace1e2016-09-22 19:23:56 -0500922void
923cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
924{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700925 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500926 struct list_head *tmp;
927 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700928 struct list_head tmp_list;
929
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800930 if (!tcon->use_persistent || !tcon->need_reopen_files)
931 return;
932
933 tcon->need_reopen_files = false;
934
Joe Perchesa0a30362020-04-14 22:42:53 -0700935 cifs_dbg(FYI, "Reopen persistent handles\n");
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700936 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500937
938 /* list all files open on tree connection, reopen resilient handles */
939 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700940 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500941 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700942 if (!open_file->invalidHandle)
943 continue;
944 cifsFileInfo_get(open_file);
945 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500946 }
947 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700948
949 list_for_each_safe(tmp, tmp1, &tmp_list) {
950 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800951 if (cifs_reopen_file(open_file, false /* do not flush */))
952 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700953 list_del_init(&open_file->rlist);
954 cifsFileInfo_put(open_file);
955 }
Steve French52ace1e2016-09-22 19:23:56 -0500956}
957
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958int cifs_closedir(struct inode *inode, struct file *file)
959{
960 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400961 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700962 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700963 struct cifs_tcon *tcon;
964 struct TCP_Server_Info *server;
965 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
Joe Perchesf96637b2013-05-04 22:12:25 -0500967 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700969 if (cfile == NULL)
970 return rc;
971
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400972 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700973 tcon = tlink_tcon(cfile->tlink);
974 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
Joe Perchesf96637b2013-05-04 22:12:25 -0500976 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500977 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400978 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700979 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500980 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700981 if (server->ops->close_dir)
982 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
983 else
984 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500985 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700986 /* not much we can do if it fails anyway, ignore rc */
987 rc = 0;
988 } else
Steve French3afca262016-09-22 18:58:16 -0500989 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700991 buf = cfile->srch_inf.ntwrk_buf_start;
992 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500993 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700994 cfile->srch_inf.ntwrk_buf_start = NULL;
995 if (cfile->srch_inf.smallBuf)
996 cifs_small_buf_release(buf);
997 else
998 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -07001000
1001 cifs_put_tlink(cfile->tlink);
1002 kfree(file->private_data);
1003 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001005 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 return rc;
1007}
1008
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001009static struct cifsLockInfo *
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001010cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001011{
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001012 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001013 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001014 if (!lock)
1015 return lock;
1016 lock->offset = offset;
1017 lock->length = length;
1018 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001019 lock->pid = current->tgid;
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001020 lock->flags = flags;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001021 INIT_LIST_HEAD(&lock->blist);
1022 init_waitqueue_head(&lock->block_q);
1023 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001024}
1025
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001026void
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001027cifs_del_lock_waiters(struct cifsLockInfo *lock)
1028{
1029 struct cifsLockInfo *li, *tmp;
1030 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1031 list_del_init(&li->blist);
1032 wake_up(&li->block_q);
1033 }
1034}
1035
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001036#define CIFS_LOCK_OP 0
1037#define CIFS_READ_OP 1
1038#define CIFS_WRITE_OP 2
1039
1040/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001041static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001042cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001043 __u64 length, __u8 type, __u16 flags,
1044 struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001045 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001046{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001047 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001048 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001049 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001050
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001051 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001052 if (offset + length <= li->offset ||
1053 offset >= li->offset + li->length)
1054 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001055 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1056 server->ops->compare_fids(cfile, cur_cfile)) {
1057 /* shared lock prevents write op through the same fid */
1058 if (!(li->type & server->vals->shared_lock_type) ||
1059 rw_check != CIFS_WRITE_OP)
1060 continue;
1061 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001062 if ((type & server->vals->shared_lock_type) &&
1063 ((server->ops->compare_fids(cfile, cur_cfile) &&
1064 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001065 continue;
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001066 if (rw_check == CIFS_LOCK_OP &&
1067 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1068 server->ops->compare_fids(cfile, cur_cfile))
1069 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -07001070 if (conf_lock)
1071 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001072 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001073 }
1074 return false;
1075}
1076
Pavel Shilovsky579f9052012-09-19 06:22:44 -07001077bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001078cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001079 __u8 type, __u16 flags,
1080 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001081{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001082 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001083 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +00001084 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001085
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001086 list_for_each_entry(cur, &cinode->llist, llist) {
1087 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001088 flags, cfile, conf_lock,
1089 rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001090 if (rc)
1091 break;
1092 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001093
1094 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001095}
1096
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001097/*
1098 * Check if there is another lock that prevents us to set the lock (mandatory
1099 * style). If such a lock exists, update the flock structure with its
1100 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1101 * or leave it the same if we can't. Returns 0 if we don't need to request to
1102 * the server or 1 otherwise.
1103 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001104static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001105cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1106 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001107{
1108 int rc = 0;
1109 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001110 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001111 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001112 bool exist;
1113
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001114 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001115
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001116 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001117 flock->fl_flags, &conf_lock,
1118 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001119 if (exist) {
1120 flock->fl_start = conf_lock->offset;
1121 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1122 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001123 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001124 flock->fl_type = F_RDLCK;
1125 else
1126 flock->fl_type = F_WRLCK;
1127 } else if (!cinode->can_cache_brlcks)
1128 rc = 1;
1129 else
1130 flock->fl_type = F_UNLCK;
1131
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001132 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001133 return rc;
1134}
1135
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001136static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001137cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001138{
David Howells2b0143b2015-03-17 22:25:59 +00001139 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001140 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001141 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001142 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001143}
1144
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001145/*
1146 * Set the byte-range lock (mandatory style). Returns:
1147 * 1) 0, if we set the lock and don't need to request to the server;
1148 * 2) 1, if no locks prevent us but we need to request to the server;
Colin Ian King413d6102018-10-26 19:07:21 +01001149 * 3) -EACCES, if there is a lock that prevents us and wait is false.
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001150 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001151static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001152cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001153 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001154{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001155 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001156 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001157 bool exist;
1158 int rc = 0;
1159
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001160try_again:
1161 exist = false;
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001162 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001163
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001164 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001165 lock->type, lock->flags, &conf_lock,
1166 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001167 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001168 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001169 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001170 return rc;
1171 }
1172
1173 if (!exist)
1174 rc = 1;
1175 else if (!wait)
1176 rc = -EACCES;
1177 else {
1178 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001179 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001180 rc = wait_event_interruptible(lock->block_q,
1181 (lock->blist.prev == &lock->blist) &&
1182 (lock->blist.next == &lock->blist));
1183 if (!rc)
1184 goto try_again;
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001185 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001186 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001187 }
1188
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001189 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001190 return rc;
1191}
1192
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001193/*
1194 * Check if there is another lock that prevents us to set the lock (posix
1195 * style). If such a lock exists, update the flock structure with its
1196 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1197 * or leave it the same if we can't. Returns 0 if we don't need to request to
1198 * the server or 1 otherwise.
1199 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001200static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001201cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1202{
1203 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001204 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001205 unsigned char saved_type = flock->fl_type;
1206
Pavel Shilovsky50792762011-10-29 17:17:57 +04001207 if ((flock->fl_flags & FL_POSIX) == 0)
1208 return 1;
1209
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001210 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001211 posix_test_lock(file, flock);
1212
1213 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1214 flock->fl_type = saved_type;
1215 rc = 1;
1216 }
1217
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001218 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001219 return rc;
1220}
1221
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001222/*
1223 * Set the byte-range lock (posix style). Returns:
yangerkun2e98c0182020-07-02 15:25:26 +08001224 * 1) <0, if the error occurs while setting the lock;
1225 * 2) 0, if we set the lock and don't need to request to the server;
1226 * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1227 * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001228 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001229static int
1230cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1231{
Al Viro496ad9a2013-01-23 17:07:38 -05001232 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
yangerkun2e98c0182020-07-02 15:25:26 +08001233 int rc = FILE_LOCK_DEFERRED + 1;
Pavel Shilovsky50792762011-10-29 17:17:57 +04001234
1235 if ((flock->fl_flags & FL_POSIX) == 0)
1236 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001237
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001238 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001239 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001240 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001241 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001242 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001243
1244 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001245 up_write(&cinode->lock_sem);
Steve French9ebb3892012-04-01 13:52:54 -05001246 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001247}
1248
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001249int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001250cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001251{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001252 unsigned int xid;
1253 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001254 struct cifsLockInfo *li, *tmp;
1255 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001256 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001257 LOCKING_ANDX_RANGE *buf, *cur;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001258 static const int types[] = {
1259 LOCKING_ANDX_LARGE_FILES,
1260 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1261 };
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001262 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001263
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001264 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001265 tcon = tlink_tcon(cfile->tlink);
1266
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001267 /*
1268 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001269 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001270 */
1271 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001272 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001273 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001274 return -EINVAL;
1275 }
1276
Ross Lagerwall92a81092019-01-08 18:30:56 +00001277 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1278 PAGE_SIZE);
1279 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1280 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001281 max_num = (max_buf - sizeof(struct smb_hdr)) /
1282 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001283 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001284 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001285 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001286 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001287 }
1288
1289 for (i = 0; i < 2; i++) {
1290 cur = buf;
1291 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001292 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001293 if (li->type != types[i])
1294 continue;
1295 cur->Pid = cpu_to_le16(li->pid);
1296 cur->LengthLow = cpu_to_le32((u32)li->length);
1297 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1298 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1299 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1300 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001301 stored_rc = cifs_lockv(xid, tcon,
1302 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001303 (__u8)li->type, 0, num,
1304 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001305 if (stored_rc)
1306 rc = stored_rc;
1307 cur = buf;
1308 num = 0;
1309 } else
1310 cur++;
1311 }
1312
1313 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001314 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001315 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001316 if (stored_rc)
1317 rc = stored_rc;
1318 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001319 }
1320
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001321 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001322 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001323 return rc;
1324}
1325
Jeff Layton3d224622016-05-24 06:27:44 -04001326static __u32
1327hash_lockowner(fl_owner_t owner)
1328{
1329 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1330}
1331
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001332struct lock_to_push {
1333 struct list_head llist;
1334 __u64 offset;
1335 __u64 length;
1336 __u32 pid;
1337 __u16 netfid;
1338 __u8 type;
1339};
1340
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001341static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001342cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001343{
David Howells2b0143b2015-03-17 22:25:59 +00001344 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001345 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001346 struct file_lock *flock;
1347 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001348 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001349 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001350 struct list_head locks_to_send, *el;
1351 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001352 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001353
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001354 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001355
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001356 if (!flctx)
1357 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001358
Jeff Laytone084c1b2015-02-16 14:32:03 -05001359 spin_lock(&flctx->flc_lock);
1360 list_for_each(el, &flctx->flc_posix) {
1361 count++;
1362 }
1363 spin_unlock(&flctx->flc_lock);
1364
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001365 INIT_LIST_HEAD(&locks_to_send);
1366
1367 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001368 * Allocating count locks is enough because no FL_POSIX locks can be
1369 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001370 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001371 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001372 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001373 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1374 if (!lck) {
1375 rc = -ENOMEM;
1376 goto err_out;
1377 }
1378 list_add_tail(&lck->llist, &locks_to_send);
1379 }
1380
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001381 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001382 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001383 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001384 if (el == &locks_to_send) {
1385 /*
1386 * The list ended. We don't have enough allocated
1387 * structures - something is really wrong.
1388 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001389 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001390 break;
1391 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001392 length = 1 + flock->fl_end - flock->fl_start;
1393 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1394 type = CIFS_RDLCK;
1395 else
1396 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001397 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001398 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001399 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001400 lck->length = length;
1401 lck->type = type;
1402 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001403 }
Jeff Layton6109c852015-01-16 15:05:57 -05001404 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001405
1406 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001407 int stored_rc;
1408
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001409 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001410 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001411 lck->type, 0);
1412 if (stored_rc)
1413 rc = stored_rc;
1414 list_del(&lck->llist);
1415 kfree(lck);
1416 }
1417
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001418out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001419 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001420 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001421err_out:
1422 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1423 list_del(&lck->llist);
1424 kfree(lck);
1425 }
1426 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001427}
1428
1429static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001430cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001431{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001432 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001433 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001434 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001435 int rc = 0;
1436
1437 /* we are going to update can_cache_brlcks here - need a write access */
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001438 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001439 if (!cinode->can_cache_brlcks) {
1440 up_write(&cinode->lock_sem);
1441 return rc;
1442 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001443
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001444 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001445 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1446 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001447 rc = cifs_push_posix_locks(cfile);
1448 else
1449 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001450
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001451 cinode->can_cache_brlcks = false;
1452 up_write(&cinode->lock_sem);
1453 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001454}
1455
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001456static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001457cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001458 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001460 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001461 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001462 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001463 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001464 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001465 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001466 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001468 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001469 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001470 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001471 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001472 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001473 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001474 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001475 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001477 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001478 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001479 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001480 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001481 *lock = 1;
1482 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001483 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001484 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001485 *unlock = 1;
1486 /* Check if unlock includes more than one lock range */
1487 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001488 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001489 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001490 *lock = 1;
1491 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001492 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001493 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001494 *lock = 1;
1495 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001496 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001497 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001498 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001500 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001501}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001503static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001504cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001505 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001506{
1507 int rc = 0;
1508 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001509 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1510 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001511 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001512 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001514 if (posix_lck) {
1515 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001516
1517 rc = cifs_posix_lock_test(file, flock);
1518 if (!rc)
1519 return rc;
1520
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001521 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001522 posix_lock_type = CIFS_RDLCK;
1523 else
1524 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001525 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1526 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001527 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001528 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 return rc;
1530 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001531
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001532 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001533 if (!rc)
1534 return rc;
1535
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001536 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001537 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1538 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001539 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001540 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1541 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001542 flock->fl_type = F_UNLCK;
1543 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001544 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1545 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001546 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001547 }
1548
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001549 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001550 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001551 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001552 }
1553
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001554 type &= ~server->vals->exclusive_lock_type;
1555
1556 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1557 type | server->vals->shared_lock_type,
1558 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001559 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001560 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1561 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001562 flock->fl_type = F_RDLCK;
1563 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001564 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1565 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001566 } else
1567 flock->fl_type = F_WRLCK;
1568
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001569 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001570}
1571
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001572void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001573cifs_move_llist(struct list_head *source, struct list_head *dest)
1574{
1575 struct list_head *li, *tmp;
1576 list_for_each_safe(li, tmp, source)
1577 list_move(li, dest);
1578}
1579
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001580void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001581cifs_free_llist(struct list_head *llist)
1582{
1583 struct cifsLockInfo *li, *tmp;
1584 list_for_each_entry_safe(li, tmp, llist, llist) {
1585 cifs_del_lock_waiters(li);
1586 list_del(&li->llist);
1587 kfree(li);
1588 }
1589}
1590
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001591int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001592cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1593 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001594{
1595 int rc = 0, stored_rc;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001596 static const int types[] = {
1597 LOCKING_ANDX_LARGE_FILES,
1598 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1599 };
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001600 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001601 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001602 LOCKING_ANDX_RANGE *buf, *cur;
1603 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001604 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001605 struct cifsLockInfo *li, *tmp;
1606 __u64 length = 1 + flock->fl_end - flock->fl_start;
1607 struct list_head tmp_llist;
1608
1609 INIT_LIST_HEAD(&tmp_llist);
1610
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001611 /*
1612 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001613 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001614 */
1615 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001616 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001617 return -EINVAL;
1618
Ross Lagerwall92a81092019-01-08 18:30:56 +00001619 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1620 PAGE_SIZE);
1621 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1622 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001623 max_num = (max_buf - sizeof(struct smb_hdr)) /
1624 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001625 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001626 if (!buf)
1627 return -ENOMEM;
1628
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001629 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001630 for (i = 0; i < 2; i++) {
1631 cur = buf;
1632 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001633 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001634 if (flock->fl_start > li->offset ||
1635 (flock->fl_start + length) <
1636 (li->offset + li->length))
1637 continue;
1638 if (current->tgid != li->pid)
1639 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001640 if (types[i] != li->type)
1641 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001642 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001643 /*
1644 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001645 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001646 */
1647 list_del(&li->llist);
1648 cifs_del_lock_waiters(li);
1649 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001650 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001651 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001652 cur->Pid = cpu_to_le16(li->pid);
1653 cur->LengthLow = cpu_to_le32((u32)li->length);
1654 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1655 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1656 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1657 /*
1658 * We need to save a lock here to let us add it again to
1659 * the file's list if the unlock range request fails on
1660 * the server.
1661 */
1662 list_move(&li->llist, &tmp_llist);
1663 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001664 stored_rc = cifs_lockv(xid, tcon,
1665 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001666 li->type, num, 0, buf);
1667 if (stored_rc) {
1668 /*
1669 * We failed on the unlock range
1670 * request - add all locks from the tmp
1671 * list to the head of the file's list.
1672 */
1673 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001674 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001675 rc = stored_rc;
1676 } else
1677 /*
1678 * The unlock range request succeed -
1679 * free the tmp list.
1680 */
1681 cifs_free_llist(&tmp_llist);
1682 cur = buf;
1683 num = 0;
1684 } else
1685 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001686 }
1687 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001688 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001689 types[i], num, 0, buf);
1690 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001691 cifs_move_llist(&tmp_llist,
1692 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001693 rc = stored_rc;
1694 } else
1695 cifs_free_llist(&tmp_llist);
1696 }
1697 }
1698
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001699 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001700 kfree(buf);
1701 return rc;
1702}
1703
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001704static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001705cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001706 bool wait_flag, bool posix_lck, int lock, int unlock,
1707 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001708{
1709 int rc = 0;
1710 __u64 length = 1 + flock->fl_end - flock->fl_start;
1711 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1712 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001713 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001714 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001715
1716 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001717 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001718
1719 rc = cifs_posix_lock_set(file, flock);
yangerkun2e98c0182020-07-02 15:25:26 +08001720 if (rc <= FILE_LOCK_DEFERRED)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001721 return rc;
1722
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001723 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001724 posix_lock_type = CIFS_RDLCK;
1725 else
1726 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001727
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001728 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001729 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001730
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001731 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001732 hash_lockowner(flock->fl_owner),
1733 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001734 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001735 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001736 }
1737
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001738 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001739 struct cifsLockInfo *lock;
1740
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001741 lock = cifs_lock_init(flock->fl_start, length, type,
1742 flock->fl_flags);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001743 if (!lock)
1744 return -ENOMEM;
1745
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001746 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001747 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001748 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001749 return rc;
1750 }
1751 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001752 goto out;
1753
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001754 /*
1755 * Windows 7 server can delay breaking lease from read to None
1756 * if we set a byte-range lock on a file - break it explicitly
1757 * before sending the lock to the server to be sure the next
1758 * read won't conflict with non-overlapted locks due to
1759 * pagereading.
1760 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001761 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1762 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001763 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001764 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1765 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001766 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001767 }
1768
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001769 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1770 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001771 if (rc) {
1772 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001773 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001774 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001775
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001776 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001777 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001778 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001779
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001780out:
Steve Frenchd0677992019-07-16 18:55:38 -05001781 if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) {
Aurelien Aptelbc31d0c2019-03-14 18:44:16 +01001782 /*
1783 * If this is a request to remove all locks because we
1784 * are closing the file, it doesn't matter if the
1785 * unlocking failed as both cifs.ko and the SMB server
1786 * remove the lock on file close
1787 */
1788 if (rc) {
1789 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1790 if (!(flock->fl_flags & FL_CLOSE))
1791 return rc;
1792 }
Benjamin Coddington4f656362015-10-22 13:38:14 -04001793 rc = locks_lock_file_wait(file, flock);
Aurelien Aptelbc31d0c2019-03-14 18:44:16 +01001794 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001795 return rc;
1796}
1797
Steve Frenchd0677992019-07-16 18:55:38 -05001798int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
1799{
1800 int rc, xid;
1801 int lock = 0, unlock = 0;
1802 bool wait_flag = false;
1803 bool posix_lck = false;
1804 struct cifs_sb_info *cifs_sb;
1805 struct cifs_tcon *tcon;
Steve Frenchd0677992019-07-16 18:55:38 -05001806 struct cifsFileInfo *cfile;
Steve Frenchd0677992019-07-16 18:55:38 -05001807 __u32 type;
1808
1809 rc = -EACCES;
1810 xid = get_xid();
1811
1812 if (!(fl->fl_flags & FL_FLOCK))
1813 return -ENOLCK;
1814
1815 cfile = (struct cifsFileInfo *)file->private_data;
1816 tcon = tlink_tcon(cfile->tlink);
1817
1818 cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
1819 tcon->ses->server);
1820 cifs_sb = CIFS_FILE_SB(file);
Steve Frenchd0677992019-07-16 18:55:38 -05001821
1822 if (cap_unix(tcon->ses) &&
1823 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1824 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1825 posix_lck = true;
1826
1827 if (!lock && !unlock) {
1828 /*
1829 * if no lock or unlock then nothing to do since we do not
1830 * know what it is
1831 */
1832 free_xid(xid);
1833 return -EOPNOTSUPP;
1834 }
1835
1836 rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
1837 xid);
1838 free_xid(xid);
1839 return rc;
1840
1841
1842}
1843
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001844int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1845{
1846 int rc, xid;
1847 int lock = 0, unlock = 0;
1848 bool wait_flag = false;
1849 bool posix_lck = false;
1850 struct cifs_sb_info *cifs_sb;
1851 struct cifs_tcon *tcon;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001852 struct cifsFileInfo *cfile;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001853 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001854
1855 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001856 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001857
Joe Perchesf96637b2013-05-04 22:12:25 -05001858 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1859 cmd, flock->fl_flags, flock->fl_type,
1860 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001861
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001862 cfile = (struct cifsFileInfo *)file->private_data;
1863 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001864
1865 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1866 tcon->ses->server);
Al Viro7119e222014-10-22 00:25:12 -04001867 cifs_sb = CIFS_FILE_SB(file);
Rohith Surabattula35866f32021-09-17 16:50:40 -05001868 set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001869
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001870 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001871 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1872 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1873 posix_lck = true;
1874 /*
1875 * BB add code here to normalize offset and length to account for
1876 * negative length which we can not accept over the wire.
1877 */
1878 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001879 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001880 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001881 return rc;
1882 }
1883
1884 if (!lock && !unlock) {
1885 /*
1886 * if no lock or unlock then nothing to do since we do not
1887 * know what it is
1888 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001889 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001890 return -EOPNOTSUPP;
1891 }
1892
1893 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1894 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001895 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 return rc;
1897}
1898
Jeff Layton597b0272012-03-23 14:40:56 -04001899/*
1900 * update the file size (if needed) after a write. Should be called with
1901 * the inode->i_lock held
1902 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001903void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001904cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1905 unsigned int bytes_written)
1906{
1907 loff_t end_of_write = offset + bytes_written;
1908
1909 if (end_of_write > cifsi->server_eof)
1910 cifsi->server_eof = end_of_write;
1911}
1912
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001913static ssize_t
1914cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1915 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916{
1917 int rc = 0;
1918 unsigned int bytes_written = 0;
1919 unsigned int total_written;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001920 struct cifs_tcon *tcon;
1921 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001922 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001923 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001924 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Aurelien Aptel7c065142020-06-04 17:23:55 +02001925 struct cifs_io_parms io_parms = {0};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926
Al Viro35c265e2014-08-19 20:25:34 -04001927 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1928 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001930 tcon = tlink_tcon(open_file->tlink);
1931 server = tcon->ses->server;
1932
1933 if (!server->ops->sync_write)
1934 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001935
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001936 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 for (total_written = 0; write_size > total_written;
1939 total_written += bytes_written) {
1940 rc = -EAGAIN;
1941 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001942 struct kvec iov[2];
1943 unsigned int len;
1944
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 /* we could deadlock if we called
1947 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001948 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001950 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 if (rc != 0)
1952 break;
1953 }
Steve French3e844692005-10-03 13:37:24 -07001954
David Howells2b0143b2015-03-17 22:25:59 +00001955 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001956 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001957 /* iov[0] is reserved for smb header */
1958 iov[1].iov_base = (char *)write_data + total_written;
1959 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001960 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001961 io_parms.tcon = tcon;
1962 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001963 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001964 rc = server->ops->sync_write(xid, &open_file->fid,
1965 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 }
1967 if (rc || (bytes_written == 0)) {
1968 if (total_written)
1969 break;
1970 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001971 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 return rc;
1973 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001974 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001975 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001976 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001977 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001978 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001979 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 }
1981
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001982 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
Jeff Layton7da4b492010-10-15 15:34:00 -04001984 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001985 spin_lock(&d_inode(dentry)->i_lock);
Rohith Surabattula78c09632021-04-19 19:02:03 +00001986 if (*offset > d_inode(dentry)->i_size) {
David Howells2b0143b2015-03-17 22:25:59 +00001987 i_size_write(d_inode(dentry), *offset);
Rohith Surabattula78c09632021-04-19 19:02:03 +00001988 d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9;
1989 }
David Howells2b0143b2015-03-17 22:25:59 +00001990 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 }
David Howells2b0143b2015-03-17 22:25:59 +00001992 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001993 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 return total_written;
1995}
1996
Jeff Layton6508d902010-09-29 19:51:11 -04001997struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1998 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001999{
2000 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04002001 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
2002
2003 /* only filter by fsuid on multiuser mounts */
2004 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2005 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00002006
Dave Wysochanskicb248812019-10-03 15:16:27 +10002007 spin_lock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00002008 /* we could simply get the first_list_entry since write-only entries
2009 are always at the end of the list but since the first entry might
2010 have a close pending, we go through the whole list */
2011 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08002012 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04002013 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04002014 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Rohith Surabattula860b69a2021-05-05 10:56:47 +00002015 if ((!open_file->invalidHandle)) {
Steve French630f3f0c2007-10-25 21:17:17 +00002016 /* found a good file */
2017 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05002018 cifsFileInfo_get(open_file);
Dave Wysochanskicb248812019-10-03 15:16:27 +10002019 spin_unlock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00002020 return open_file;
2021 } /* else might as well continue, and look for
2022 another, or simply have the caller reopen it
2023 again rather than trying to fix this handle */
2024 } else /* write only file */
2025 break; /* write only files are last so must be done */
2026 }
Dave Wysochanskicb248812019-10-03 15:16:27 +10002027 spin_unlock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00002028 return NULL;
2029}
Steve French630f3f0c2007-10-25 21:17:17 +00002030
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002031/* Return -EBADF if no handle is found and general rc otherwise */
2032int
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002033cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002034 struct cifsFileInfo **ret_file)
Steve French6148a742005-10-05 12:23:19 -07002035{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002036 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04002037 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04002038 bool any_available = false;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002039 int rc = -EBADF;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002040 unsigned int refind = 0;
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002041 bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2042 bool with_delete = flags & FIND_WR_WITH_DELETE;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002043 *ret_file = NULL;
2044
2045 /*
2046 * Having a null inode here (because mapping->host was set to zero by
2047 * the VFS or MM) should not happen but we had reports of on oops (due
2048 * to it being zero) during stress testcases so we need to check for it
2049 */
Steve French60808232006-04-22 15:53:05 +00002050
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002051 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002052 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00002053 dump_stack();
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002054 return rc;
Steve French60808232006-04-22 15:53:05 +00002055 }
2056
Jeff Laytond3892292010-11-02 16:22:50 -04002057 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
2058
Jeff Layton6508d902010-09-29 19:51:11 -04002059 /* only filter by fsuid on multiuser mounts */
2060 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2061 fsuid_only = false;
2062
Dave Wysochanskicb248812019-10-03 15:16:27 +10002063 spin_lock(&cifs_inode->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00002064refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002065 if (refind > MAX_REOPEN_ATT) {
Dave Wysochanskicb248812019-10-03 15:16:27 +10002066 spin_unlock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002067 return rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002068 }
Steve French6148a742005-10-05 12:23:19 -07002069 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04002070 if (!any_available && open_file->pid != current->tgid)
2071 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08002072 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04002073 continue;
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002074 if (with_delete && !(open_file->fid.access & DELETE))
2075 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04002076 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00002077 if (!open_file->invalidHandle) {
2078 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05002079 cifsFileInfo_get(open_file);
Dave Wysochanskicb248812019-10-03 15:16:27 +10002080 spin_unlock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002081 *ret_file = open_file;
2082 return 0;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002083 } else {
2084 if (!inv_file)
2085 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00002086 }
Steve French6148a742005-10-05 12:23:19 -07002087 }
2088 }
Jeff Layton2846d382008-09-22 21:33:33 -04002089 /* couldn't find useable FH with same pid, try any available */
2090 if (!any_available) {
2091 any_available = true;
2092 goto refind_writable;
2093 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002094
2095 if (inv_file) {
2096 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05002097 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002098 }
2099
Dave Wysochanskicb248812019-10-03 15:16:27 +10002100 spin_unlock(&cifs_inode->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002101
2102 if (inv_file) {
2103 rc = cifs_reopen_file(inv_file, false);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002104 if (!rc) {
2105 *ret_file = inv_file;
2106 return 0;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002107 }
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002108
Ronnie Sahlberg487317c2019-06-05 10:38:38 +10002109 spin_lock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002110 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
Ronnie Sahlberg487317c2019-06-05 10:38:38 +10002111 spin_unlock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002112 cifsFileInfo_put(inv_file);
2113 ++refind;
2114 inv_file = NULL;
Dave Wysochanskicb248812019-10-03 15:16:27 +10002115 spin_lock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002116 goto refind_writable;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002117 }
2118
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002119 return rc;
2120}
2121
2122struct cifsFileInfo *
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002123find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002124{
2125 struct cifsFileInfo *cfile;
2126 int rc;
2127
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002128 rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002129 if (rc)
Joe Perchesa0a30362020-04-14 22:42:53 -07002130 cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002131
2132 return cfile;
Steve French6148a742005-10-05 12:23:19 -07002133}
2134
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002135int
2136cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002137 int flags,
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002138 struct cifsFileInfo **ret_file)
2139{
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002140 struct cifsFileInfo *cfile;
Al Virof6a9bc32021-03-05 17:36:04 -05002141 void *page = alloc_dentry_path();
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002142
2143 *ret_file = NULL;
2144
2145 spin_lock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002146 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2147 struct cifsInodeInfo *cinode;
2148 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2149 if (IS_ERR(full_path)) {
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002150 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002151 free_dentry_path(page);
2152 return PTR_ERR(full_path);
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002153 }
Al Virof6a9bc32021-03-05 17:36:04 -05002154 if (strcmp(full_path, name))
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002155 continue;
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002156
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002157 cinode = CIFS_I(d_inode(cfile->dentry));
2158 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002159 free_dentry_path(page);
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002160 return cifs_get_writable_file(cinode, flags, ret_file);
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002161 }
2162
2163 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002164 free_dentry_path(page);
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002165 return -ENOENT;
2166}
2167
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002168int
2169cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2170 struct cifsFileInfo **ret_file)
2171{
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002172 struct cifsFileInfo *cfile;
Al Virof6a9bc32021-03-05 17:36:04 -05002173 void *page = alloc_dentry_path();
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002174
2175 *ret_file = NULL;
2176
2177 spin_lock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002178 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2179 struct cifsInodeInfo *cinode;
2180 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2181 if (IS_ERR(full_path)) {
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002182 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002183 free_dentry_path(page);
2184 return PTR_ERR(full_path);
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002185 }
Al Virof6a9bc32021-03-05 17:36:04 -05002186 if (strcmp(full_path, name))
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002187 continue;
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002188
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002189 cinode = CIFS_I(d_inode(cfile->dentry));
2190 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002191 free_dentry_path(page);
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002192 *ret_file = find_readable_file(cinode, 0);
2193 return *ret_file ? 0 : -ENOENT;
2194 }
2195
2196 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002197 free_dentry_path(page);
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002198 return -ENOENT;
2199}
2200
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
2202{
2203 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002204 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 char *write_data;
2206 int rc = -EFAULT;
2207 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07002209 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210
2211 if (!mapping || !mapping->host)
2212 return -EFAULT;
2213
2214 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215
2216 offset += (loff_t)from;
2217 write_data = kmap(page);
2218 write_data += from;
2219
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002220 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 kunmap(page);
2222 return -EIO;
2223 }
2224
2225 /* racing with truncate? */
2226 if (offset > mapping->host->i_size) {
2227 kunmap(page);
2228 return 0; /* don't care */
2229 }
2230
2231 /* check to make sure that we are not extending the file */
2232 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002233 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002235 rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
2236 &open_file);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002237 if (!rc) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04002238 bytes_written = cifs_write(open_file, open_file->pid,
2239 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04002240 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07002242 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002243 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07002244 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002245 else if (bytes_written < 0)
2246 rc = bytes_written;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002247 else
2248 rc = -EFAULT;
Steve French6148a742005-10-05 12:23:19 -07002249 } else {
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002250 cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
2251 if (!is_retryable_error(rc))
2252 rc = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 }
2254
2255 kunmap(page);
2256 return rc;
2257}
2258
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002259static struct cifs_writedata *
2260wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2261 pgoff_t end, pgoff_t *index,
2262 unsigned int *found_pages)
2263{
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002264 struct cifs_writedata *wdata;
2265
2266 wdata = cifs_writedata_alloc((unsigned int)tofind,
2267 cifs_writev_complete);
2268 if (!wdata)
2269 return NULL;
2270
Jan Kara9c19a9c2017-11-15 17:35:26 -08002271 *found_pages = find_get_pages_range_tag(mapping, index, end,
2272 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002273 return wdata;
2274}
2275
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002276static unsigned int
2277wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2278 struct address_space *mapping,
2279 struct writeback_control *wbc,
2280 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2281{
2282 unsigned int nr_pages = 0, i;
2283 struct page *page;
2284
2285 for (i = 0; i < found_pages; i++) {
2286 page = wdata->pages[i];
2287 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002288 * At this point we hold neither the i_pages lock nor the
2289 * page lock: the page may be truncated or invalidated
2290 * (changing page->mapping to NULL), or even swizzled
2291 * back from swapper_space to tmpfs file mapping
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002292 */
2293
2294 if (nr_pages == 0)
2295 lock_page(page);
2296 else if (!trylock_page(page))
2297 break;
2298
2299 if (unlikely(page->mapping != mapping)) {
2300 unlock_page(page);
2301 break;
2302 }
2303
2304 if (!wbc->range_cyclic && page->index > end) {
2305 *done = true;
2306 unlock_page(page);
2307 break;
2308 }
2309
2310 if (*next && (page->index != *next)) {
2311 /* Not next consecutive page */
2312 unlock_page(page);
2313 break;
2314 }
2315
2316 if (wbc->sync_mode != WB_SYNC_NONE)
2317 wait_on_page_writeback(page);
2318
2319 if (PageWriteback(page) ||
2320 !clear_page_dirty_for_io(page)) {
2321 unlock_page(page);
2322 break;
2323 }
2324
2325 /*
2326 * This actually clears the dirty bit in the radix tree.
2327 * See cifs_writepage() for more commentary.
2328 */
2329 set_page_writeback(page);
2330 if (page_offset(page) >= i_size_read(mapping->host)) {
2331 *done = true;
2332 unlock_page(page);
2333 end_page_writeback(page);
2334 break;
2335 }
2336
2337 wdata->pages[i] = page;
2338 *next = page->index + 1;
2339 ++nr_pages;
2340 }
2341
2342 /* reset index to refind any pages skipped */
2343 if (nr_pages == 0)
2344 *index = wdata->pages[0]->index + 1;
2345
2346 /* put any pages we aren't going to use */
2347 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002348 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002349 wdata->pages[i] = NULL;
2350 }
2351
2352 return nr_pages;
2353}
2354
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002355static int
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002356wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2357 struct address_space *mapping, struct writeback_control *wbc)
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002358{
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002359 int rc;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002360
2361 wdata->sync_mode = wbc->sync_mode;
2362 wdata->nr_pages = nr_pages;
2363 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002364 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002365 wdata->tailsz = min(i_size_read(mapping->host) -
2366 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002367 (loff_t)PAGE_SIZE);
2368 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002369 wdata->pid = wdata->cfile->pid;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002370
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002371 rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002372 if (rc)
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002373 return rc;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002374
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002375 if (wdata->cfile->invalidHandle)
2376 rc = -EAGAIN;
2377 else
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002378 rc = wdata->server->ops->async_writev(wdata,
2379 cifs_writedata_release);
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002380
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002381 return rc;
2382}
2383
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002385 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386{
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002387 struct inode *inode = mapping->host;
2388 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002389 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002390 bool done = false, scanned = false, range_whole = false;
2391 pgoff_t end, index;
2392 struct cifs_writedata *wdata;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002393 struct cifsFileInfo *cfile = NULL;
Steve French37c0eb42005-10-05 14:50:29 -07002394 int rc = 0;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002395 int saved_rc = 0;
Steve French0cb012d2018-10-11 01:01:02 -05002396 unsigned int xid;
Steve French50c2f752007-07-13 00:33:32 +00002397
Steve French37c0eb42005-10-05 14:50:29 -07002398 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002399 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002400 * one page at a time via cifs_writepage
2401 */
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10002402 if (cifs_sb->ctx->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002403 return generic_writepages(mapping, wbc);
2404
Steve French0cb012d2018-10-11 01:01:02 -05002405 xid = get_xid();
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002406 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002407 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002408 end = -1;
2409 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002410 index = wbc->range_start >> PAGE_SHIFT;
2411 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002412 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002413 range_whole = true;
2414 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002415 }
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002416 server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses);
2417
Steve French37c0eb42005-10-05 14:50:29 -07002418retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002419 while (!done && index <= end) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002420 unsigned int i, nr_pages, found_pages, wsize;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002421 pgoff_t next = 0, tofind, saved_index = index;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002422 struct cifs_credits credits_on_stack;
2423 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002424 int get_file_rc = 0;
Steve French37c0eb42005-10-05 14:50:29 -07002425
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002426 if (cfile)
2427 cifsFileInfo_put(cfile);
2428
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002429 rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002430
2431 /* in case of an error store it to return later */
2432 if (rc)
2433 get_file_rc = rc;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002434
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10002435 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002436 &wsize, credits);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002437 if (rc != 0) {
2438 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002439 break;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002440 }
Steve French37c0eb42005-10-05 14:50:29 -07002441
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002442 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002443
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002444 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2445 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002446 if (!wdata) {
2447 rc = -ENOMEM;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002448 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002449 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002450 break;
2451 }
2452
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002453 if (found_pages == 0) {
2454 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002455 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002456 break;
2457 }
2458
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002459 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2460 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002461
2462 /* nothing to write? */
2463 if (nr_pages == 0) {
2464 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002465 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002466 continue;
2467 }
2468
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002469 wdata->credits = credits_on_stack;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002470 wdata->cfile = cfile;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002471 wdata->server = server;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002472 cfile = NULL;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002473
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002474 if (!wdata->cfile) {
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002475 cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
2476 get_file_rc);
2477 if (is_retryable_error(get_file_rc))
2478 rc = get_file_rc;
2479 else
2480 rc = -EBADF;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002481 } else
2482 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002483
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002484 for (i = 0; i < nr_pages; ++i)
2485 unlock_page(wdata->pages[i]);
2486
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002487 /* send failure -- clean up the mess */
2488 if (rc != 0) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002489 add_credits_and_wake_if(server, &wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002490 for (i = 0; i < nr_pages; ++i) {
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002491 if (is_retryable_error(rc))
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002492 redirty_page_for_writepage(wbc,
2493 wdata->pages[i]);
2494 else
2495 SetPageError(wdata->pages[i]);
2496 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002497 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002498 }
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002499 if (!is_retryable_error(rc))
Jeff Layton941b8532011-01-11 07:24:01 -05002500 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002501 }
2502 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002503
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002504 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2505 index = saved_index;
2506 continue;
2507 }
2508
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002509 /* Return immediately if we received a signal during writing */
2510 if (is_interrupt_error(rc)) {
2511 done = true;
2512 break;
2513 }
2514
2515 if (rc != 0 && saved_rc == 0)
2516 saved_rc = rc;
2517
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002518 wbc->nr_to_write -= nr_pages;
2519 if (wbc->nr_to_write <= 0)
2520 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002521
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002522 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002523 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002524
Steve French37c0eb42005-10-05 14:50:29 -07002525 if (!scanned && !done) {
2526 /*
2527 * We hit the last page and there is more work to be done: wrap
2528 * back to the start of the file
2529 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002530 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002531 index = 0;
2532 goto retry;
2533 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002534
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002535 if (saved_rc != 0)
2536 rc = saved_rc;
2537
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002538 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002539 mapping->writeback_index = index;
2540
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002541 if (cfile)
2542 cifsFileInfo_put(cfile);
Steve French0cb012d2018-10-11 01:01:02 -05002543 free_xid(xid);
Rohith Surabattulac3f207a2021-04-13 00:26:42 -05002544 /* Indication to update ctime and mtime as close is deferred */
2545 set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 return rc;
2547}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002549static int
2550cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002552 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002553 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002555 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002557 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002558 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002559 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002560
2561 /*
2562 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2563 *
2564 * A writepage() implementation always needs to do either this,
2565 * or re-dirty the page with "redirty_page_for_writepage()" in
2566 * the case of a failure.
2567 *
2568 * Just unlocking the page will cause the radix tree tag-bits
2569 * to fail to update with the state of the page correctly.
2570 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002571 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002572retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002573 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002574 if (is_retryable_error(rc)) {
2575 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
Jeff Layton97b37f22017-05-25 06:59:52 -04002576 goto retry_write;
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002577 redirty_page_for_writepage(wbc, page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002578 } else if (rc != 0) {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002579 SetPageError(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002580 mapping_set_error(page->mapping, rc);
2581 } else {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002582 SetPageUptodate(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002583 }
Linus Torvaldscb876f42006-12-23 16:19:07 -08002584 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002585 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002586 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 return rc;
2588}
2589
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002590static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2591{
2592 int rc = cifs_writepage_locked(page, wbc);
2593 unlock_page(page);
2594 return rc;
2595}
2596
Nick Piggind9414772008-09-24 11:32:59 -04002597static int cifs_write_end(struct file *file, struct address_space *mapping,
2598 loff_t pos, unsigned len, unsigned copied,
2599 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600{
Nick Piggind9414772008-09-24 11:32:59 -04002601 int rc;
2602 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002603 struct cifsFileInfo *cfile = file->private_data;
2604 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2605 __u32 pid;
2606
2607 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2608 pid = cfile->pid;
2609 else
2610 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611
Joe Perchesf96637b2013-05-04 22:12:25 -05002612 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002613 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002614
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002615 if (PageChecked(page)) {
2616 if (copied == len)
2617 SetPageUptodate(page);
2618 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002619 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002620 SetPageUptodate(page);
2621
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002623 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002624 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002625 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002626
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002627 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628 /* this is probably better than directly calling
2629 partialpage_write since in this function the file handle is
2630 known which we might as well leverage */
2631 /* BB check if anything else missing out of ppw
2632 such as updating last write time */
2633 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002634 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002635 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002637
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002638 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002639 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002640 rc = copied;
2641 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002642 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 }
2644
Nick Piggind9414772008-09-24 11:32:59 -04002645 if (rc > 0) {
2646 spin_lock(&inode->i_lock);
Rohith Surabattula78c09632021-04-19 19:02:03 +00002647 if (pos > inode->i_size) {
Nick Piggind9414772008-09-24 11:32:59 -04002648 i_size_write(inode, pos);
Rohith Surabattula78c09632021-04-19 19:02:03 +00002649 inode->i_blocks = (512 - 1 + pos) >> 9;
2650 }
Nick Piggind9414772008-09-24 11:32:59 -04002651 spin_unlock(&inode->i_lock);
2652 }
2653
2654 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002655 put_page(page);
Rohith Surabattulac3f207a2021-04-13 00:26:42 -05002656 /* Indication to update ctime and mtime as close is deferred */
2657 set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
Nick Piggind9414772008-09-24 11:32:59 -04002658
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 return rc;
2660}
2661
Josef Bacik02c24a82011-07-16 20:44:56 -04002662int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2663 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002665 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002667 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002668 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002669 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002670 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002671 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002673 rc = file_write_and_wait_range(file, start, end);
Steve French2391ca42020-02-06 16:04:59 -06002674 if (rc) {
2675 trace_cifs_fsync_err(inode->i_ino, rc);
Josef Bacik02c24a82011-07-16 20:44:56 -04002676 return rc;
Steve French2391ca42020-02-06 16:04:59 -06002677 }
Josef Bacik02c24a82011-07-16 20:44:56 -04002678
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002679 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680
Al Viro35c265e2014-08-19 20:25:34 -04002681 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2682 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002683
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002684 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002685 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002686 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002687 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002688 rc = 0; /* don't care about it in fsync */
2689 }
2690 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002691
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002692 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002693 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2694 server = tcon->ses->server;
Steve French71e68642021-11-10 01:47:48 -06002695 if (server->ops->flush == NULL) {
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002696 rc = -ENOSYS;
Steve French71e68642021-11-10 01:47:48 -06002697 goto strict_fsync_exit;
2698 }
2699
2700 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2701 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2702 if (smbfile) {
2703 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2704 cifsFileInfo_put(smbfile);
2705 } else
2706 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2707 } else
2708 rc = server->ops->flush(xid, tcon, &smbfile->fid);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002709 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002710
Steve French71e68642021-11-10 01:47:48 -06002711strict_fsync_exit:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002712 free_xid(xid);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002713 return rc;
2714}
2715
Josef Bacik02c24a82011-07-16 20:44:56 -04002716int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002717{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002718 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002719 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002720 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002721 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002722 struct cifsFileInfo *smbfile = file->private_data;
Steve French71e68642021-11-10 01:47:48 -06002723 struct inode *inode = file_inode(file);
Al Viro7119e222014-10-22 00:25:12 -04002724 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002725
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002726 rc = file_write_and_wait_range(file, start, end);
Steve Frenchf2bf09e2020-02-05 18:22:37 -06002727 if (rc) {
2728 trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
Josef Bacik02c24a82011-07-16 20:44:56 -04002729 return rc;
Steve Frenchf2bf09e2020-02-05 18:22:37 -06002730 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002731
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002732 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002733
Al Viro35c265e2014-08-19 20:25:34 -04002734 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2735 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002736
2737 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002738 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2739 server = tcon->ses->server;
Steve French71e68642021-11-10 01:47:48 -06002740 if (server->ops->flush == NULL) {
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002741 rc = -ENOSYS;
Steve French71e68642021-11-10 01:47:48 -06002742 goto fsync_exit;
2743 }
2744
2745 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2746 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2747 if (smbfile) {
2748 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2749 cifsFileInfo_put(smbfile);
2750 } else
2751 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2752 } else
2753 rc = server->ops->flush(xid, tcon, &smbfile->fid);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002754 }
Steve Frenchb298f222009-02-21 21:17:43 +00002755
Steve French71e68642021-11-10 01:47:48 -06002756fsync_exit:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002757 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 return rc;
2759}
2760
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761/*
2762 * As file closes, flush all cached write data for this inode checking
2763 * for write behind errors.
2764 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002765int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766{
Al Viro496ad9a2013-01-23 17:07:38 -05002767 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 int rc = 0;
2769
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002770 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002771 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002772
Joe Perchesf96637b2013-05-04 22:12:25 -05002773 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Steve Frenchf2bf09e2020-02-05 18:22:37 -06002774 if (rc)
2775 trace_cifs_flush_err(inode->i_ino, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 return rc;
2777}
2778
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002779static int
2780cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2781{
2782 int rc = 0;
2783 unsigned long i;
2784
2785 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002786 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002787 if (!pages[i]) {
2788 /*
2789 * save number of pages we have already allocated and
2790 * return with ENOMEM error
2791 */
2792 num_pages = i;
2793 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002794 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002795 }
2796 }
2797
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002798 if (rc) {
2799 for (i = 0; i < num_pages; i++)
2800 put_page(pages[i]);
2801 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002802 return rc;
2803}
2804
2805static inline
2806size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2807{
2808 size_t num_pages;
2809 size_t clen;
2810
2811 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002812 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002813
2814 if (cur_len)
2815 *cur_len = clen;
2816
2817 return num_pages;
2818}
2819
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002820static void
Steve French4a5c80d2014-02-07 20:45:12 -06002821cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002822{
2823 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002824 struct cifs_writedata *wdata = container_of(refcount,
2825 struct cifs_writedata, refcount);
2826
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002827 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
Steve French4a5c80d2014-02-07 20:45:12 -06002828 for (i = 0; i < wdata->nr_pages; i++)
2829 put_page(wdata->pages[i]);
2830 cifs_writedata_release(refcount);
2831}
2832
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002833static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2834
Steve French4a5c80d2014-02-07 20:45:12 -06002835static void
2836cifs_uncached_writev_complete(struct work_struct *work)
2837{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002838 struct cifs_writedata *wdata = container_of(work,
2839 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002840 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002841 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2842
2843 spin_lock(&inode->i_lock);
2844 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2845 if (cifsi->server_eof > inode->i_size)
2846 i_size_write(inode, cifsi->server_eof);
2847 spin_unlock(&inode->i_lock);
2848
2849 complete(&wdata->done);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002850 collect_uncached_write_data(wdata->ctx);
2851 /* the below call can possibly free the last ref to aio ctx */
Steve French4a5c80d2014-02-07 20:45:12 -06002852 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002853}
2854
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002855static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002856wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2857 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002858{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002859 size_t save_len, copied, bytes, cur_len = *len;
2860 unsigned long i, nr_pages = *num_pages;
2861
2862 save_len = cur_len;
2863 for (i = 0; i < nr_pages; i++) {
2864 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2865 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2866 cur_len -= copied;
2867 /*
2868 * If we didn't copy as much as we expected, then that
2869 * may mean we trod into an unmapped area. Stop copying
2870 * at that point. On the next pass through the big
2871 * loop, we'll likely end up getting a zero-length
2872 * write and bailing out of it.
2873 */
2874 if (copied < bytes)
2875 break;
2876 }
2877 cur_len = save_len - cur_len;
2878 *len = cur_len;
2879
2880 /*
2881 * If we have no data to send, then that probably means that
2882 * the copy above failed altogether. That's most likely because
2883 * the address in the iovec was bogus. Return -EFAULT and let
2884 * the caller free anything we allocated and bail out.
2885 */
2886 if (!cur_len)
2887 return -EFAULT;
2888
2889 /*
2890 * i + 1 now represents the number of pages we actually used in
2891 * the copy phase above.
2892 */
2893 *num_pages = i + 1;
2894 return 0;
2895}
2896
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002897static int
Long Li8c5f9c12018-10-31 22:13:10 +00002898cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
2899 struct cifs_aio_ctx *ctx)
2900{
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002901 unsigned int wsize;
2902 struct cifs_credits credits;
Long Li8c5f9c12018-10-31 22:13:10 +00002903 int rc;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002904 struct TCP_Server_Info *server = wdata->server;
Long Li8c5f9c12018-10-31 22:13:10 +00002905
Long Li8c5f9c12018-10-31 22:13:10 +00002906 do {
Long Lid53e2922019-03-15 07:54:59 +00002907 if (wdata->cfile->invalidHandle) {
Long Li8c5f9c12018-10-31 22:13:10 +00002908 rc = cifs_reopen_file(wdata->cfile, false);
Long Lid53e2922019-03-15 07:54:59 +00002909 if (rc == -EAGAIN)
2910 continue;
2911 else if (rc)
2912 break;
2913 }
2914
2915
2916 /*
2917 * Wait for credits to resend this wdata.
2918 * Note: we are attempting to resend the whole wdata not in
2919 * segments
2920 */
2921 do {
2922 rc = server->ops->wait_mtu_credits(server, wdata->bytes,
2923 &wsize, &credits);
2924 if (rc)
2925 goto fail;
2926
2927 if (wsize < wdata->bytes) {
2928 add_credits_and_wake_if(server, &credits, 0);
2929 msleep(1000);
2930 }
2931 } while (wsize < wdata->bytes);
2932 wdata->credits = credits;
2933
2934 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2935
2936 if (!rc) {
2937 if (wdata->cfile->invalidHandle)
2938 rc = -EAGAIN;
Long Lib7a55bb2019-10-15 22:54:50 +00002939 else {
2940#ifdef CONFIG_CIFS_SMB_DIRECT
2941 if (wdata->mr) {
2942 wdata->mr->need_invalidate = true;
2943 smbd_deregister_mr(wdata->mr);
2944 wdata->mr = NULL;
2945 }
2946#endif
Long Lid53e2922019-03-15 07:54:59 +00002947 rc = server->ops->async_writev(wdata,
Long Li8c5f9c12018-10-31 22:13:10 +00002948 cifs_uncached_writedata_release);
Long Lib7a55bb2019-10-15 22:54:50 +00002949 }
Long Lid53e2922019-03-15 07:54:59 +00002950 }
Long Li8c5f9c12018-10-31 22:13:10 +00002951
Long Lid53e2922019-03-15 07:54:59 +00002952 /* If the write was successfully sent, we are done */
2953 if (!rc) {
2954 list_add_tail(&wdata->list, wdata_list);
2955 return 0;
2956 }
Long Li8c5f9c12018-10-31 22:13:10 +00002957
Long Lid53e2922019-03-15 07:54:59 +00002958 /* Roll back credits and retry if needed */
2959 add_credits_and_wake_if(server, &wdata->credits, 0);
2960 } while (rc == -EAGAIN);
2961
2962fail:
Long Li8c5f9c12018-10-31 22:13:10 +00002963 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Long Li8c5f9c12018-10-31 22:13:10 +00002964 return rc;
2965}
2966
2967static int
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002968cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2969 struct cifsFileInfo *open_file,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002970 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2971 struct cifs_aio_ctx *ctx)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002972{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002973 int rc = 0;
2974 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002975 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002976 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002977 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002978 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002979 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002980 struct TCP_Server_Info *server;
Long Li8c5f9c12018-10-31 22:13:10 +00002981 struct page **pagevec;
2982 size_t start;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002983 unsigned int xid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002984
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002985 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2986 pid = open_file->pid;
2987 else
2988 pid = current->tgid;
2989
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002990 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002991 xid = get_xid();
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002992
2993 do {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002994 unsigned int wsize;
2995 struct cifs_credits credits_on_stack;
2996 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002997
Pavel Shilovsky3e952992019-01-25 11:59:01 -08002998 if (open_file->invalidHandle) {
2999 rc = cifs_reopen_file(open_file, false);
3000 if (rc == -EAGAIN)
3001 continue;
3002 else if (rc)
3003 break;
3004 }
3005
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10003006 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003007 &wsize, credits);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04003008 if (rc)
3009 break;
3010
Long Lib6bc8a72018-12-16 23:17:04 +00003011 cur_len = min_t(const size_t, len, wsize);
3012
Long Li8c5f9c12018-10-31 22:13:10 +00003013 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05003014 ssize_t result;
3015
3016 result = iov_iter_get_pages_alloc(
Long Lib6bc8a72018-12-16 23:17:04 +00003017 from, &pagevec, cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05003018 if (result < 0) {
Long Li8c5f9c12018-10-31 22:13:10 +00003019 cifs_dbg(VFS,
Joe Perchesa0a30362020-04-14 22:42:53 -07003020 "direct_writev couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
3021 result, iov_iter_type(from),
3022 from->iov_offset, from->count);
Long Li8c5f9c12018-10-31 22:13:10 +00003023 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00003024
3025 rc = result;
3026 add_credits_and_wake_if(server, credits, 0);
Long Li8c5f9c12018-10-31 22:13:10 +00003027 break;
3028 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05003029 cur_len = (size_t)result;
Long Li8c5f9c12018-10-31 22:13:10 +00003030 iov_iter_advance(from, cur_len);
3031
3032 nr_pages =
3033 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
3034
3035 wdata = cifs_writedata_direct_alloc(pagevec,
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003036 cifs_uncached_writev_complete);
Long Li8c5f9c12018-10-31 22:13:10 +00003037 if (!wdata) {
3038 rc = -ENOMEM;
3039 add_credits_and_wake_if(server, credits, 0);
3040 break;
3041 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003042
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003043
Long Li8c5f9c12018-10-31 22:13:10 +00003044 wdata->page_offset = start;
3045 wdata->tailsz =
3046 nr_pages > 1 ?
3047 cur_len - (PAGE_SIZE - start) -
3048 (nr_pages - 2) * PAGE_SIZE :
3049 cur_len;
3050 } else {
3051 nr_pages = get_numpages(wsize, len, &cur_len);
3052 wdata = cifs_writedata_alloc(nr_pages,
3053 cifs_uncached_writev_complete);
3054 if (!wdata) {
3055 rc = -ENOMEM;
3056 add_credits_and_wake_if(server, credits, 0);
3057 break;
3058 }
Jeff Layton5d81de82014-02-14 07:20:35 -05003059
Long Li8c5f9c12018-10-31 22:13:10 +00003060 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
3061 if (rc) {
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08003062 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00003063 kfree(wdata);
3064 add_credits_and_wake_if(server, credits, 0);
3065 break;
3066 }
3067
3068 num_pages = nr_pages;
3069 rc = wdata_fill_from_iovec(
3070 wdata, from, &cur_len, &num_pages);
3071 if (rc) {
3072 for (i = 0; i < nr_pages; i++)
3073 put_page(wdata->pages[i]);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08003074 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00003075 kfree(wdata);
3076 add_credits_and_wake_if(server, credits, 0);
3077 break;
3078 }
3079
3080 /*
3081 * Bring nr_pages down to the number of pages we
3082 * actually used, and free any pages that we didn't use.
3083 */
3084 for ( ; nr_pages > num_pages; nr_pages--)
3085 put_page(wdata->pages[nr_pages - 1]);
3086
3087 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
3088 }
Jeff Layton5d81de82014-02-14 07:20:35 -05003089
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003090 wdata->sync_mode = WB_SYNC_ALL;
3091 wdata->nr_pages = nr_pages;
3092 wdata->offset = (__u64)offset;
3093 wdata->cfile = cifsFileInfo_get(open_file);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003094 wdata->server = server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003095 wdata->pid = pid;
3096 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07003097 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003098 wdata->credits = credits_on_stack;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003099 wdata->ctx = ctx;
3100 kref_get(&ctx->refcount);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04003101
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003102 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
3103
3104 if (!rc) {
3105 if (wdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003106 rc = -EAGAIN;
3107 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003108 rc = server->ops->async_writev(wdata,
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04003109 cifs_uncached_writedata_release);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003110 }
3111
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003112 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003113 add_credits_and_wake_if(server, &wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06003114 kref_put(&wdata->refcount,
3115 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04003116 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04003117 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04003118 iov_iter_advance(from, offset - saved_offset);
3119 continue;
3120 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003121 break;
3122 }
3123
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04003124 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003125 offset += cur_len;
3126 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003127 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003128
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003129 free_xid(xid);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003130 return rc;
3131}
3132
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003133static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
3134{
3135 struct cifs_writedata *wdata, *tmp;
3136 struct cifs_tcon *tcon;
3137 struct cifs_sb_info *cifs_sb;
3138 struct dentry *dentry = ctx->cfile->dentry;
Dan Carpentere946d3c2021-09-21 23:33:35 +03003139 ssize_t rc;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003140
3141 tcon = tlink_tcon(ctx->cfile->tlink);
3142 cifs_sb = CIFS_SB(dentry->d_sb);
3143
3144 mutex_lock(&ctx->aio_mutex);
3145
3146 if (list_empty(&ctx->list)) {
3147 mutex_unlock(&ctx->aio_mutex);
3148 return;
3149 }
3150
3151 rc = ctx->rc;
3152 /*
3153 * Wait for and collect replies for any successful sends in order of
3154 * increasing offset. Once an error is hit, then return without waiting
3155 * for any more replies.
3156 */
3157restart_loop:
3158 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
3159 if (!rc) {
3160 if (!try_wait_for_completion(&wdata->done)) {
3161 mutex_unlock(&ctx->aio_mutex);
3162 return;
3163 }
3164
3165 if (wdata->result)
3166 rc = wdata->result;
3167 else
3168 ctx->total_len += wdata->bytes;
3169
3170 /* resend call if it's a retryable error */
3171 if (rc == -EAGAIN) {
3172 struct list_head tmp_list;
3173 struct iov_iter tmp_from = ctx->iter;
3174
3175 INIT_LIST_HEAD(&tmp_list);
3176 list_del_init(&wdata->list);
3177
Long Li8c5f9c12018-10-31 22:13:10 +00003178 if (ctx->direct_io)
3179 rc = cifs_resend_wdata(
3180 wdata, &tmp_list, ctx);
3181 else {
3182 iov_iter_advance(&tmp_from,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003183 wdata->offset - ctx->pos);
3184
Long Li8c5f9c12018-10-31 22:13:10 +00003185 rc = cifs_write_from_iter(wdata->offset,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003186 wdata->bytes, &tmp_from,
3187 ctx->cfile, cifs_sb, &tmp_list,
3188 ctx);
Long Lid53e2922019-03-15 07:54:59 +00003189
3190 kref_put(&wdata->refcount,
3191 cifs_uncached_writedata_release);
Long Li8c5f9c12018-10-31 22:13:10 +00003192 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003193
3194 list_splice(&tmp_list, &ctx->list);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003195 goto restart_loop;
3196 }
3197 }
3198 list_del_init(&wdata->list);
3199 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
3200 }
3201
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003202 cifs_stats_bytes_written(tcon, ctx->total_len);
3203 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
3204
3205 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3206
3207 mutex_unlock(&ctx->aio_mutex);
3208
3209 if (ctx->iocb && ctx->iocb->ki_complete)
Jens Axboe6b19b762021-10-21 09:22:35 -06003210 ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003211 else
3212 complete(&ctx->done);
3213}
3214
Long Li8c5f9c12018-10-31 22:13:10 +00003215static ssize_t __cifs_writev(
3216 struct kiocb *iocb, struct iov_iter *from, bool direct)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003217{
Al Viroe9d15932015-04-06 22:44:11 -04003218 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003219 ssize_t total_written = 0;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003220 struct cifsFileInfo *cfile;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003221 struct cifs_tcon *tcon;
3222 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003223 struct cifs_aio_ctx *ctx;
Al Virofc56b982016-09-21 18:18:23 -04003224 struct iov_iter saved_from = *from;
Long Li8c5f9c12018-10-31 22:13:10 +00003225 size_t len = iov_iter_count(from);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003226 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003227
Al Viroe9d15932015-04-06 22:44:11 -04003228 /*
Long Li8c5f9c12018-10-31 22:13:10 +00003229 * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
3230 * In this case, fall back to non-direct write function.
3231 * this could be improved by getting pages directly in ITER_KVEC
Al Viroe9d15932015-04-06 22:44:11 -04003232 */
David Howells66294002019-11-21 08:13:58 +00003233 if (direct && iov_iter_is_kvec(from)) {
Long Li8c5f9c12018-10-31 22:13:10 +00003234 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
3235 direct = false;
3236 }
Al Viroe9d15932015-04-06 22:44:11 -04003237
Al Viro3309dd02015-04-09 12:55:47 -04003238 rc = generic_write_checks(iocb, from);
3239 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003240 return rc;
3241
Al Viro7119e222014-10-22 00:25:12 -04003242 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003243 cfile = file->private_data;
3244 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003245
3246 if (!tcon->ses->server->ops->async_writev)
3247 return -ENOSYS;
3248
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003249 ctx = cifs_aio_ctx_alloc();
3250 if (!ctx)
3251 return -ENOMEM;
3252
3253 ctx->cfile = cifsFileInfo_get(cfile);
3254
3255 if (!is_sync_kiocb(iocb))
3256 ctx->iocb = iocb;
3257
3258 ctx->pos = iocb->ki_pos;
3259
Long Li8c5f9c12018-10-31 22:13:10 +00003260 if (direct) {
3261 ctx->direct_io = true;
3262 ctx->iter = *from;
3263 ctx->len = len;
3264 } else {
3265 rc = setup_aio_ctx_iter(ctx, from, WRITE);
3266 if (rc) {
3267 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3268 return rc;
3269 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003270 }
3271
3272 /* grab a lock here due to read response handlers can access ctx */
3273 mutex_lock(&ctx->aio_mutex);
3274
3275 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
3276 cfile, cifs_sb, &ctx->list, ctx);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003277
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003278 /*
3279 * If at least one write was successfully sent, then discard any rc
3280 * value from the later writes. If the other write succeeds, then
3281 * we'll end up returning whatever was written. If it fails, then
3282 * we'll get a new rc value from that.
3283 */
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003284 if (!list_empty(&ctx->list))
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003285 rc = 0;
3286
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003287 mutex_unlock(&ctx->aio_mutex);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003288
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003289 if (rc) {
3290 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3291 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003292 }
3293
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003294 if (!is_sync_kiocb(iocb)) {
3295 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3296 return -EIOCBQUEUED;
3297 }
3298
3299 rc = wait_for_completion_killable(&ctx->done);
3300 if (rc) {
3301 mutex_lock(&ctx->aio_mutex);
3302 ctx->rc = rc = -EINTR;
3303 total_written = ctx->total_len;
3304 mutex_unlock(&ctx->aio_mutex);
3305 } else {
3306 rc = ctx->rc;
3307 total_written = ctx->total_len;
3308 }
3309
3310 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3311
Al Viroe9d15932015-04-06 22:44:11 -04003312 if (unlikely(!total_written))
3313 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003314
Al Viroe9d15932015-04-06 22:44:11 -04003315 iocb->ki_pos += total_written;
Al Viroe9d15932015-04-06 22:44:11 -04003316 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003317}
3318
Long Li8c5f9c12018-10-31 22:13:10 +00003319ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
3320{
3321 return __cifs_writev(iocb, from, true);
3322}
3323
3324ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
3325{
3326 return __cifs_writev(iocb, from, false);
3327}
3328
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003329static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003330cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003331{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003332 struct file *file = iocb->ki_filp;
3333 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
3334 struct inode *inode = file->f_mapping->host;
3335 struct cifsInodeInfo *cinode = CIFS_I(inode);
3336 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04003337 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003338
Rabin Vincent966681c2017-06-29 16:01:42 +02003339 inode_lock(inode);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003340 /*
3341 * We need to hold the sem to be sure nobody modifies lock list
3342 * with a brlock that prevents writing.
3343 */
3344 down_read(&cinode->lock_sem);
Al Viro5f380c72015-04-07 11:28:12 -04003345
Al Viro3309dd02015-04-09 12:55:47 -04003346 rc = generic_write_checks(iocb, from);
3347 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04003348 goto out;
3349
Al Viro5f380c72015-04-07 11:28:12 -04003350 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Ronnie Sahlberg96457592018-10-04 09:24:38 +10003351 server->vals->exclusive_lock_type, 0,
3352 NULL, CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04003353 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04003354 else
3355 rc = -EACCES;
3356out:
Rabin Vincent966681c2017-06-29 16:01:42 +02003357 up_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05003358 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04003359
Christoph Hellwige2592212016-04-07 08:52:01 -07003360 if (rc > 0)
3361 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003362 return rc;
3363}
3364
3365ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003366cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003367{
Al Viro496ad9a2013-01-23 17:07:38 -05003368 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003369 struct cifsInodeInfo *cinode = CIFS_I(inode);
3370 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3371 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3372 iocb->ki_filp->private_data;
3373 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003374 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003375
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003376 written = cifs_get_writer(cinode);
3377 if (written)
3378 return written;
3379
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003380 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003381 if (cap_unix(tcon->ses) &&
3382 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003383 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04003384 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003385 goto out;
3386 }
Al Viro3dae8752014-04-03 12:05:17 -04003387 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003388 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04003389 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003390 /*
3391 * For non-oplocked files in strict cache mode we need to write the data
3392 * to the server exactly from the pos to pos+len-1 rather than flush all
3393 * affected pages because it may cause a error with mandatory locks on
3394 * these pages but not on the region from pos to ppos+len-1.
3395 */
Al Viro3dae8752014-04-03 12:05:17 -04003396 written = cifs_user_writev(iocb, from);
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003397 if (CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003398 /*
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003399 * We have read level caching and we have just sent a write
3400 * request to the server thus making data in the cache stale.
3401 * Zap the cache and set oplock/lease level to NONE to avoid
3402 * reading stale data from the cache. All subsequent read
3403 * operations will read new data from the server.
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003404 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003405 cifs_zap_mapping(inode);
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003406 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05003407 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003408 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003409 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003410out:
3411 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003412 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003413}
3414
Jeff Layton0471ca32012-05-16 07:13:16 -04003415static struct cifs_readdata *
Long Lif9f5aca2018-05-30 12:47:54 -07003416cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04003417{
3418 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003419
Long Lif9f5aca2018-05-30 12:47:54 -07003420 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04003421 if (rdata != NULL) {
Long Lif9f5aca2018-05-30 12:47:54 -07003422 rdata->pages = pages;
Jeff Layton6993f742012-05-16 07:13:17 -04003423 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003424 INIT_LIST_HEAD(&rdata->list);
3425 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04003426 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04003427 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003428
Jeff Layton0471ca32012-05-16 07:13:16 -04003429 return rdata;
3430}
3431
Long Lif9f5aca2018-05-30 12:47:54 -07003432static struct cifs_readdata *
3433cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
3434{
3435 struct page **pages =
Kees Cook6396bb22018-06-12 14:03:40 -07003436 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Long Lif9f5aca2018-05-30 12:47:54 -07003437 struct cifs_readdata *ret = NULL;
3438
3439 if (pages) {
3440 ret = cifs_readdata_direct_alloc(pages, complete);
3441 if (!ret)
3442 kfree(pages);
3443 }
3444
3445 return ret;
3446}
3447
Jeff Layton6993f742012-05-16 07:13:17 -04003448void
3449cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04003450{
Jeff Layton6993f742012-05-16 07:13:17 -04003451 struct cifs_readdata *rdata = container_of(refcount,
3452 struct cifs_readdata, refcount);
Long Libd3dcc62017-11-22 17:38:47 -07003453#ifdef CONFIG_CIFS_SMB_DIRECT
3454 if (rdata->mr) {
3455 smbd_deregister_mr(rdata->mr);
3456 rdata->mr = NULL;
3457 }
3458#endif
Jeff Layton6993f742012-05-16 07:13:17 -04003459 if (rdata->cfile)
3460 cifsFileInfo_put(rdata->cfile);
3461
Long Lif9f5aca2018-05-30 12:47:54 -07003462 kvfree(rdata->pages);
Jeff Layton0471ca32012-05-16 07:13:16 -04003463 kfree(rdata);
3464}
3465
Jeff Layton2a1bb132012-05-16 07:13:17 -04003466static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003467cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04003468{
3469 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003470 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04003471 unsigned int i;
3472
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003473 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04003474 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3475 if (!page) {
3476 rc = -ENOMEM;
3477 break;
3478 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003479 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04003480 }
3481
3482 if (rc) {
Roberto Bergantinos Corpas31fad7d2019-05-28 09:38:14 +02003483 unsigned int nr_page_failed = i;
3484
3485 for (i = 0; i < nr_page_failed; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003486 put_page(rdata->pages[i]);
3487 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04003488 }
3489 }
3490 return rc;
3491}
3492
3493static void
3494cifs_uncached_readdata_release(struct kref *refcount)
3495{
Jeff Layton1c892542012-05-16 07:13:17 -04003496 struct cifs_readdata *rdata = container_of(refcount,
3497 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003498 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003499
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003500 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003501 for (i = 0; i < rdata->nr_pages; i++) {
3502 put_page(rdata->pages[i]);
Jeff Layton1c892542012-05-16 07:13:17 -04003503 }
3504 cifs_readdata_release(refcount);
3505}
3506
Jeff Layton1c892542012-05-16 07:13:17 -04003507/**
3508 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3509 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05003510 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04003511 *
3512 * This function copies data from a list of pages in a readdata response into
3513 * an array of iovecs. It will first calculate where the data should go
3514 * based on the info in the readdata and then copy the data into that spot.
3515 */
Al Viro7f25bba2014-02-04 14:07:43 -05003516static int
3517cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04003518{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04003519 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003520 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003521
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003522 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003523 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02003524 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003525 size_t written;
3526
David Howells00e23702018-10-22 13:07:28 +01003527 if (unlikely(iov_iter_is_pipe(iter))) {
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003528 void *addr = kmap_atomic(page);
3529
3530 written = copy_to_iter(addr, copy, iter);
3531 kunmap_atomic(addr);
3532 } else
3533 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05003534 remaining -= written;
3535 if (written < copy && iov_iter_count(iter) > 0)
3536 break;
Jeff Layton1c892542012-05-16 07:13:17 -04003537 }
Al Viro7f25bba2014-02-04 14:07:43 -05003538 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04003539}
3540
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003541static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3542
Jeff Layton1c892542012-05-16 07:13:17 -04003543static void
3544cifs_uncached_readv_complete(struct work_struct *work)
3545{
3546 struct cifs_readdata *rdata = container_of(work,
3547 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04003548
3549 complete(&rdata->done);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003550 collect_uncached_read_data(rdata->ctx);
3551 /* the below call can possibly free the last ref to aio ctx */
Jeff Layton1c892542012-05-16 07:13:17 -04003552 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3553}
3554
3555static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003556uncached_fill_pages(struct TCP_Server_Info *server,
3557 struct cifs_readdata *rdata, struct iov_iter *iter,
3558 unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04003559{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003560 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003561 unsigned int i;
3562 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003563 unsigned int page_offset = rdata->page_offset;
Jeff Layton1c892542012-05-16 07:13:17 -04003564
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003565 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07003566 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003567 for (i = 0; i < nr_pages; i++) {
3568 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05003569 size_t n;
Long Li1dbe3462018-05-30 12:47:55 -07003570 unsigned int segment_size = rdata->pagesz;
3571
3572 if (i == 0)
3573 segment_size -= page_offset;
3574 else
3575 page_offset = 0;
3576
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003577
Al Viro71335662016-01-09 19:54:50 -05003578 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04003579 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003580 rdata->pages[i] = NULL;
3581 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04003582 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07003583 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003584 }
Long Li1dbe3462018-05-30 12:47:55 -07003585
Al Viro71335662016-01-09 19:54:50 -05003586 n = len;
Long Li1dbe3462018-05-30 12:47:55 -07003587 if (len >= segment_size)
Al Viro71335662016-01-09 19:54:50 -05003588 /* enough data to fill the page */
Long Li1dbe3462018-05-30 12:47:55 -07003589 n = segment_size;
3590 else
Al Viro71335662016-01-09 19:54:50 -05003591 rdata->tailsz = len;
Long Li1dbe3462018-05-30 12:47:55 -07003592 len -= n;
3593
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003594 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003595 result = copy_page_from_iter(
3596 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003597#ifdef CONFIG_CIFS_SMB_DIRECT
3598 else if (rdata->mr)
3599 result = n;
3600#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003601 else
Long Li1dbe3462018-05-30 12:47:55 -07003602 result = cifs_read_page_from_socket(
3603 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003604 if (result < 0)
3605 break;
3606
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003607 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003608 }
3609
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003610 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3611 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003612}
3613
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003614static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003615cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3616 struct cifs_readdata *rdata, unsigned int len)
3617{
3618 return uncached_fill_pages(server, rdata, NULL, len);
3619}
3620
3621static int
3622cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3623 struct cifs_readdata *rdata,
3624 struct iov_iter *iter)
3625{
3626 return uncached_fill_pages(server, rdata, iter, iter->count);
3627}
3628
Long Li6e6e2b82018-10-31 22:13:09 +00003629static int cifs_resend_rdata(struct cifs_readdata *rdata,
3630 struct list_head *rdata_list,
3631 struct cifs_aio_ctx *ctx)
3632{
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003633 unsigned int rsize;
3634 struct cifs_credits credits;
Long Li6e6e2b82018-10-31 22:13:09 +00003635 int rc;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003636 struct TCP_Server_Info *server;
3637
3638 /* XXX: should we pick a new channel here? */
3639 server = rdata->server;
Long Li6e6e2b82018-10-31 22:13:09 +00003640
Long Li6e6e2b82018-10-31 22:13:09 +00003641 do {
Long Li0b0dfd52019-03-15 07:55:00 +00003642 if (rdata->cfile->invalidHandle) {
3643 rc = cifs_reopen_file(rdata->cfile, true);
3644 if (rc == -EAGAIN)
3645 continue;
3646 else if (rc)
3647 break;
3648 }
3649
3650 /*
3651 * Wait for credits to resend this rdata.
3652 * Note: we are attempting to resend the whole rdata not in
3653 * segments
3654 */
3655 do {
3656 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
Long Li6e6e2b82018-10-31 22:13:09 +00003657 &rsize, &credits);
3658
Long Li0b0dfd52019-03-15 07:55:00 +00003659 if (rc)
3660 goto fail;
Long Li6e6e2b82018-10-31 22:13:09 +00003661
Long Li0b0dfd52019-03-15 07:55:00 +00003662 if (rsize < rdata->bytes) {
3663 add_credits_and_wake_if(server, &credits, 0);
3664 msleep(1000);
3665 }
3666 } while (rsize < rdata->bytes);
3667 rdata->credits = credits;
3668
3669 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3670 if (!rc) {
3671 if (rdata->cfile->invalidHandle)
3672 rc = -EAGAIN;
Long Lib7a55bb2019-10-15 22:54:50 +00003673 else {
3674#ifdef CONFIG_CIFS_SMB_DIRECT
3675 if (rdata->mr) {
3676 rdata->mr->need_invalidate = true;
3677 smbd_deregister_mr(rdata->mr);
3678 rdata->mr = NULL;
3679 }
3680#endif
Long Li0b0dfd52019-03-15 07:55:00 +00003681 rc = server->ops->async_readv(rdata);
Long Lib7a55bb2019-10-15 22:54:50 +00003682 }
Long Li6e6e2b82018-10-31 22:13:09 +00003683 }
Long Li6e6e2b82018-10-31 22:13:09 +00003684
Long Li0b0dfd52019-03-15 07:55:00 +00003685 /* If the read was successfully sent, we are done */
3686 if (!rc) {
3687 /* Add to aio pending list */
3688 list_add_tail(&rdata->list, rdata_list);
3689 return 0;
3690 }
Long Li6e6e2b82018-10-31 22:13:09 +00003691
Long Li0b0dfd52019-03-15 07:55:00 +00003692 /* Roll back credits and retry if needed */
3693 add_credits_and_wake_if(server, &rdata->credits, 0);
3694 } while (rc == -EAGAIN);
Long Li6e6e2b82018-10-31 22:13:09 +00003695
Long Li0b0dfd52019-03-15 07:55:00 +00003696fail:
3697 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Long Li6e6e2b82018-10-31 22:13:09 +00003698 return rc;
3699}
3700
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003701static int
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003702cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003703 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3704 struct cifs_aio_ctx *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003706 struct cifs_readdata *rdata;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003707 unsigned int npages, rsize;
3708 struct cifs_credits credits_on_stack;
3709 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003710 size_t cur_len;
3711 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003712 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003713 struct TCP_Server_Info *server;
Long Li6e6e2b82018-10-31 22:13:09 +00003714 struct page **pagevec;
3715 size_t start;
3716 struct iov_iter direct_iov = ctx->iter;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003717
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003718 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003719
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003720 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3721 pid = open_file->pid;
3722 else
3723 pid = current->tgid;
3724
Long Li6e6e2b82018-10-31 22:13:09 +00003725 if (ctx->direct_io)
3726 iov_iter_advance(&direct_iov, offset - ctx->pos);
3727
Jeff Layton1c892542012-05-16 07:13:17 -04003728 do {
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003729 if (open_file->invalidHandle) {
3730 rc = cifs_reopen_file(open_file, true);
3731 if (rc == -EAGAIN)
3732 continue;
3733 else if (rc)
3734 break;
3735 }
3736
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10003737 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003738 &rsize, credits);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003739 if (rc)
3740 break;
3741
3742 cur_len = min_t(const size_t, len, rsize);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003743
Long Li6e6e2b82018-10-31 22:13:09 +00003744 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05003745 ssize_t result;
Long Li6e6e2b82018-10-31 22:13:09 +00003746
Steve Frenchb98e26d2018-11-01 10:54:32 -05003747 result = iov_iter_get_pages_alloc(
Long Li6e6e2b82018-10-31 22:13:09 +00003748 &direct_iov, &pagevec,
3749 cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05003750 if (result < 0) {
Long Li6e6e2b82018-10-31 22:13:09 +00003751 cifs_dbg(VFS,
Joe Perchesa0a30362020-04-14 22:42:53 -07003752 "Couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
3753 result, iov_iter_type(&direct_iov),
3754 direct_iov.iov_offset,
3755 direct_iov.count);
Long Li6e6e2b82018-10-31 22:13:09 +00003756 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00003757
3758 rc = result;
3759 add_credits_and_wake_if(server, credits, 0);
Long Li6e6e2b82018-10-31 22:13:09 +00003760 break;
3761 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05003762 cur_len = (size_t)result;
Long Li6e6e2b82018-10-31 22:13:09 +00003763 iov_iter_advance(&direct_iov, cur_len);
3764
3765 rdata = cifs_readdata_direct_alloc(
3766 pagevec, cifs_uncached_readv_complete);
3767 if (!rdata) {
3768 add_credits_and_wake_if(server, credits, 0);
3769 rc = -ENOMEM;
3770 break;
3771 }
3772
3773 npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
3774 rdata->page_offset = start;
3775 rdata->tailsz = npages > 1 ?
3776 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
3777 cur_len;
3778
3779 } else {
3780
3781 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3782 /* allocate a readdata struct */
3783 rdata = cifs_readdata_alloc(npages,
Jeff Layton1c892542012-05-16 07:13:17 -04003784 cifs_uncached_readv_complete);
Long Li6e6e2b82018-10-31 22:13:09 +00003785 if (!rdata) {
3786 add_credits_and_wake_if(server, credits, 0);
3787 rc = -ENOMEM;
3788 break;
3789 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003790
Long Li6e6e2b82018-10-31 22:13:09 +00003791 rc = cifs_read_allocate_pages(rdata, npages);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08003792 if (rc) {
3793 kvfree(rdata->pages);
3794 kfree(rdata);
3795 add_credits_and_wake_if(server, credits, 0);
3796 break;
3797 }
Long Li6e6e2b82018-10-31 22:13:09 +00003798
3799 rdata->tailsz = PAGE_SIZE;
3800 }
Jeff Layton1c892542012-05-16 07:13:17 -04003801
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003802 rdata->server = server;
Jeff Layton1c892542012-05-16 07:13:17 -04003803 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003804 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003805 rdata->offset = offset;
3806 rdata->bytes = cur_len;
3807 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003808 rdata->pagesz = PAGE_SIZE;
3809 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003810 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003811 rdata->credits = credits_on_stack;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003812 rdata->ctx = ctx;
3813 kref_get(&ctx->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003814
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003815 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3816
3817 if (!rc) {
3818 if (rdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003819 rc = -EAGAIN;
3820 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003821 rc = server->ops->async_readv(rdata);
3822 }
3823
Jeff Layton1c892542012-05-16 07:13:17 -04003824 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003825 add_credits_and_wake_if(server, &rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003826 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003827 cifs_uncached_readdata_release);
3828 if (rc == -EAGAIN) {
3829 iov_iter_revert(&direct_iov, cur_len);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003830 continue;
Long Li6e6e2b82018-10-31 22:13:09 +00003831 }
Jeff Layton1c892542012-05-16 07:13:17 -04003832 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003833 }
Jeff Layton1c892542012-05-16 07:13:17 -04003834
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003835 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003836 offset += cur_len;
3837 len -= cur_len;
3838 } while (len > 0);
3839
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003840 return rc;
3841}
3842
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003843static void
3844collect_uncached_read_data(struct cifs_aio_ctx *ctx)
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003845{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003846 struct cifs_readdata *rdata, *tmp;
3847 struct iov_iter *to = &ctx->iter;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003848 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003849 int rc;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003850
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003851 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003852
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003853 mutex_lock(&ctx->aio_mutex);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003854
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003855 if (list_empty(&ctx->list)) {
3856 mutex_unlock(&ctx->aio_mutex);
3857 return;
3858 }
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003859
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003860 rc = ctx->rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003861 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003862again:
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003863 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
Jeff Layton1c892542012-05-16 07:13:17 -04003864 if (!rc) {
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003865 if (!try_wait_for_completion(&rdata->done)) {
3866 mutex_unlock(&ctx->aio_mutex);
3867 return;
3868 }
3869
3870 if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003871 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003872 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003873 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003874
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003875 list_del_init(&rdata->list);
3876 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003877
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003878 /*
3879 * Got a part of data and then reconnect has
3880 * happened -- fill the buffer and continue
3881 * reading.
3882 */
3883 if (got_bytes && got_bytes < rdata->bytes) {
Long Li6e6e2b82018-10-31 22:13:09 +00003884 rc = 0;
3885 if (!ctx->direct_io)
3886 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003887 if (rc) {
3888 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003889 cifs_uncached_readdata_release);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003890 continue;
3891 }
3892 }
3893
Long Li6e6e2b82018-10-31 22:13:09 +00003894 if (ctx->direct_io) {
3895 /*
3896 * Re-use rdata as this is a
3897 * direct I/O
3898 */
3899 rc = cifs_resend_rdata(
3900 rdata,
3901 &tmp_list, ctx);
3902 } else {
3903 rc = cifs_send_async_read(
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003904 rdata->offset + got_bytes,
3905 rdata->bytes - got_bytes,
3906 rdata->cfile, cifs_sb,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003907 &tmp_list, ctx);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003908
Long Li6e6e2b82018-10-31 22:13:09 +00003909 kref_put(&rdata->refcount,
3910 cifs_uncached_readdata_release);
3911 }
3912
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003913 list_splice(&tmp_list, &ctx->list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003914
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003915 goto again;
3916 } else if (rdata->result)
3917 rc = rdata->result;
Long Li6e6e2b82018-10-31 22:13:09 +00003918 else if (!ctx->direct_io)
Jeff Layton1c892542012-05-16 07:13:17 -04003919 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003920
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003921 /* if there was a short read -- discard anything left */
3922 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3923 rc = -ENODATA;
Long Li6e6e2b82018-10-31 22:13:09 +00003924
3925 ctx->total_len += rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003926 }
3927 list_del_init(&rdata->list);
3928 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003929 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003930
Jérôme Glisse13f59382019-04-10 15:37:47 -04003931 if (!ctx->direct_io)
Long Li6e6e2b82018-10-31 22:13:09 +00003932 ctx->total_len = ctx->len - iov_iter_count(to);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003933
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003934 /* mask nodata case */
3935 if (rc == -ENODATA)
3936 rc = 0;
3937
Yilu Lin97adda82020-03-18 11:59:19 +08003938 ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003939
3940 mutex_unlock(&ctx->aio_mutex);
3941
3942 if (ctx->iocb && ctx->iocb->ki_complete)
Jens Axboe6b19b762021-10-21 09:22:35 -06003943 ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003944 else
3945 complete(&ctx->done);
3946}
3947
Long Li6e6e2b82018-10-31 22:13:09 +00003948static ssize_t __cifs_readv(
3949 struct kiocb *iocb, struct iov_iter *to, bool direct)
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003950{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003951 size_t len;
Long Li6e6e2b82018-10-31 22:13:09 +00003952 struct file *file = iocb->ki_filp;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003953 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003954 struct cifsFileInfo *cfile;
Long Li6e6e2b82018-10-31 22:13:09 +00003955 struct cifs_tcon *tcon;
3956 ssize_t rc, total_read = 0;
3957 loff_t offset = iocb->ki_pos;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003958 struct cifs_aio_ctx *ctx;
3959
Long Li6e6e2b82018-10-31 22:13:09 +00003960 /*
3961 * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
3962 * fall back to data copy read path
3963 * this could be improved by getting pages directly in ITER_KVEC
3964 */
David Howells66294002019-11-21 08:13:58 +00003965 if (direct && iov_iter_is_kvec(to)) {
Long Li6e6e2b82018-10-31 22:13:09 +00003966 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
3967 direct = false;
3968 }
3969
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003970 len = iov_iter_count(to);
3971 if (!len)
3972 return 0;
3973
3974 cifs_sb = CIFS_FILE_SB(file);
3975 cfile = file->private_data;
3976 tcon = tlink_tcon(cfile->tlink);
3977
3978 if (!tcon->ses->server->ops->async_readv)
3979 return -ENOSYS;
3980
3981 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3982 cifs_dbg(FYI, "attempting read on write only file instance\n");
3983
3984 ctx = cifs_aio_ctx_alloc();
3985 if (!ctx)
3986 return -ENOMEM;
3987
3988 ctx->cfile = cifsFileInfo_get(cfile);
3989
3990 if (!is_sync_kiocb(iocb))
3991 ctx->iocb = iocb;
3992
David Howells00e23702018-10-22 13:07:28 +01003993 if (iter_is_iovec(to))
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003994 ctx->should_dirty = true;
3995
Long Li6e6e2b82018-10-31 22:13:09 +00003996 if (direct) {
3997 ctx->pos = offset;
3998 ctx->direct_io = true;
3999 ctx->iter = *to;
4000 ctx->len = len;
4001 } else {
4002 rc = setup_aio_ctx_iter(ctx, to, READ);
4003 if (rc) {
4004 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4005 return rc;
4006 }
4007 len = ctx->len;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07004008 }
4009
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07004010 /* grab a lock here due to read response handlers can access ctx */
4011 mutex_lock(&ctx->aio_mutex);
4012
4013 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
4014
4015 /* if at least one read request send succeeded, then reset rc */
4016 if (!list_empty(&ctx->list))
4017 rc = 0;
4018
4019 mutex_unlock(&ctx->aio_mutex);
4020
4021 if (rc) {
4022 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4023 return rc;
4024 }
4025
4026 if (!is_sync_kiocb(iocb)) {
4027 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4028 return -EIOCBQUEUED;
4029 }
4030
4031 rc = wait_for_completion_killable(&ctx->done);
4032 if (rc) {
4033 mutex_lock(&ctx->aio_mutex);
4034 ctx->rc = rc = -EINTR;
4035 total_read = ctx->total_len;
4036 mutex_unlock(&ctx->aio_mutex);
4037 } else {
4038 rc = ctx->rc;
4039 total_read = ctx->total_len;
4040 }
4041
4042 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4043
Al Viro0165e812014-02-04 14:19:48 -05004044 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04004045 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05004046 return total_read;
4047 }
4048 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03004049}
4050
Long Li6e6e2b82018-10-31 22:13:09 +00004051ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
4052{
4053 return __cifs_readv(iocb, to, true);
4054}
4055
4056ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
4057{
4058 return __cifs_readv(iocb, to, false);
4059}
4060
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004061ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04004062cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03004063{
Al Viro496ad9a2013-01-23 17:07:38 -05004064 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004065 struct cifsInodeInfo *cinode = CIFS_I(inode);
4066 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4067 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
4068 iocb->ki_filp->private_data;
4069 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
4070 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03004071
4072 /*
4073 * In strict cache mode we need to read from the server all the time
4074 * if we don't have level II oplock because the server can delay mtime
4075 * change - so we can't make a decision about inode invalidating.
4076 * And we can also fail with pagereading if there are mandatory locks
4077 * on pages affected by this read but not on the region from pos to
4078 * pos+len-1.
4079 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004080 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04004081 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03004082
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004083 if (cap_unix(tcon->ses) &&
4084 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
4085 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04004086 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004087
4088 /*
4089 * We need to hold the sem to be sure nobody modifies lock list
4090 * with a brlock that prevents reading.
4091 */
4092 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04004093 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004094 tcon->ses->server->vals->shared_lock_type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10004095 0, NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04004096 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004097 up_read(&cinode->lock_sem);
4098 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03004099}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004101static ssize_t
4102cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103{
4104 int rc = -EACCES;
4105 unsigned int bytes_read = 0;
4106 unsigned int total_read;
4107 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04004108 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004110 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004111 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004112 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004113 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114 struct cifsFileInfo *open_file;
Aurelien Aptel7c065142020-06-04 17:23:55 +02004115 struct cifs_io_parms io_parms = {0};
Steve Frenchec637e32005-12-12 20:53:18 -08004116 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004117 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004118
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004119 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04004120 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004121
Jeff Layton5eba8ab2011-10-19 15:30:26 -04004122 /* FIXME: set up handlers for larger reads and/or convert to async */
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10004123 rsize = min_t(unsigned int, cifs_sb->ctx->rsize, CIFSMaxBufSize);
Jeff Layton5eba8ab2011-10-19 15:30:26 -04004124
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304126 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004127 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304128 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004129 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07004130 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004131 tcon = tlink_tcon(open_file->tlink);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004132 server = cifs_pick_channel(tcon->ses);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004133
4134 if (!server->ops->sync_read) {
4135 free_xid(xid);
4136 return -ENOSYS;
4137 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004138
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004139 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4140 pid = open_file->pid;
4141 else
4142 pid = current->tgid;
4143
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05004145 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004147 for (total_read = 0, cur_offset = read_data; read_size > total_read;
4148 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04004149 do {
4150 current_read_size = min_t(uint, read_size - total_read,
4151 rsize);
4152 /*
4153 * For windows me and 9x we do not want to request more
4154 * than it negotiated since it will refuse the read
4155 * then.
4156 */
Steve French9bd21d42020-05-13 10:27:16 -05004157 if (!(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004158 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04004159 current_read_size = min_t(uint,
4160 current_read_size, CIFSMaxBufSize);
4161 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00004162 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04004163 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164 if (rc != 0)
4165 break;
4166 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004167 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004168 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004169 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004170 io_parms.length = current_read_size;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004171 io_parms.server = server;
Steve Frenchdb8b6312014-09-22 05:13:55 -05004172 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004173 &bytes_read, &cur_offset,
4174 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04004175 } while (rc == -EAGAIN);
4176
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177 if (rc || (bytes_read == 0)) {
4178 if (total_read) {
4179 break;
4180 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004181 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182 return rc;
4183 }
4184 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004185 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004186 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187 }
4188 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004189 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004190 return total_read;
4191}
4192
Jeff Laytonca83ce32011-04-12 09:13:44 -04004193/*
4194 * If the page is mmap'ed into a process' page tables, then we need to make
4195 * sure that it doesn't change while being written back.
4196 */
Souptick Joardera5240cb2018-04-15 00:58:25 +05304197static vm_fault_t
Dave Jiang11bac802017-02-24 14:56:41 -08004198cifs_page_mkwrite(struct vm_fault *vmf)
Jeff Laytonca83ce32011-04-12 09:13:44 -04004199{
4200 struct page *page = vmf->page;
Shyam Prasad N18d04062021-08-10 10:22:28 +00004201 struct file *file = vmf->vma->vm_file;
4202 struct inode *inode = file_inode(file);
4203
4204 cifs_fscache_wait_on_page_write(inode, page);
Jeff Laytonca83ce32011-04-12 09:13:44 -04004205
4206 lock_page(page);
4207 return VM_FAULT_LOCKED;
4208}
4209
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07004210static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04004211 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07004212 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04004213 .page_mkwrite = cifs_page_mkwrite,
4214};
4215
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004216int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
4217{
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004218 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05004219 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004220
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004221 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004222
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004223 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004224 rc = cifs_zap_mapping(inode);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004225 if (!rc)
4226 rc = generic_file_mmap(file, vma);
4227 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04004228 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004229
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004230 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004231 return rc;
4232}
4233
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
4235{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236 int rc, xid;
4237
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004238 xid = get_xid();
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004239
Jeff Laytonabab0952010-02-12 07:44:18 -05004240 rc = cifs_revalidate_file(file);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004241 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004242 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
4243 rc);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004244 if (!rc)
4245 rc = generic_file_mmap(file, vma);
4246 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04004247 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004248
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004249 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250 return rc;
4251}
4252
Jeff Layton0471ca32012-05-16 07:13:16 -04004253static void
4254cifs_readv_complete(struct work_struct *work)
4255{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004256 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04004257 struct cifs_readdata *rdata = container_of(work,
4258 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04004259
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004260 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004261 for (i = 0; i < rdata->nr_pages; i++) {
4262 struct page *page = rdata->pages[i];
4263
Johannes Weiner6058eae2020-06-03 16:02:40 -07004264 lru_cache_add(page);
Jeff Layton0471ca32012-05-16 07:13:16 -04004265
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004266 if (rdata->result == 0 ||
4267 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04004268 flush_dcache_page(page);
4269 SetPageUptodate(page);
Shyam Prasad N18d04062021-08-10 10:22:28 +00004270 } else
4271 SetPageError(page);
Jeff Layton0471ca32012-05-16 07:13:16 -04004272
4273 unlock_page(page);
4274
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004275 if (rdata->result == 0 ||
4276 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04004277 cifs_readpage_to_fscache(rdata->mapping->host, page);
Shyam Prasad N18d04062021-08-10 10:22:28 +00004278 else
4279 cifs_fscache_uncache_page(rdata->mapping->host, page);
Jeff Layton0471ca32012-05-16 07:13:16 -04004280
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004281 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004282
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004283 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004284 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04004285 }
Jeff Layton6993f742012-05-16 07:13:17 -04004286 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04004287}
4288
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004289static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004290readpages_fill_pages(struct TCP_Server_Info *server,
4291 struct cifs_readdata *rdata, struct iov_iter *iter,
4292 unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004293{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004294 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004295 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004296 u64 eof;
4297 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004298 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07004299 unsigned int page_offset = rdata->page_offset;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004300
4301 /* determine the eof that the server (probably) has */
4302 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004303 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05004304 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004305
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004306 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004307 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004308 for (i = 0; i < nr_pages; i++) {
4309 struct page *page = rdata->pages[i];
Long Li1dbe3462018-05-30 12:47:55 -07004310 unsigned int to_read = rdata->pagesz;
4311 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004312
Long Li1dbe3462018-05-30 12:47:55 -07004313 if (i == 0)
4314 to_read -= page_offset;
4315 else
4316 page_offset = 0;
4317
4318 n = to_read;
4319
4320 if (len >= to_read) {
4321 len -= to_read;
Jeff Layton8321fec2012-09-19 06:22:32 -07004322 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004323 /* enough for partial page, fill and zero the rest */
Long Li1dbe3462018-05-30 12:47:55 -07004324 zero_user(page, len + page_offset, to_read - len);
Al Viro71335662016-01-09 19:54:50 -05004325 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07004326 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004327 } else if (page->index > eof_index) {
4328 /*
4329 * The VFS will not try to do readahead past the
4330 * i_size, but it's possible that we have outstanding
4331 * writes with gaps in the middle and the i_size hasn't
4332 * caught up yet. Populate those with zeroed out pages
4333 * to prevent the VFS from repeatedly attempting to
4334 * fill them until the writes are flushed.
4335 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004336 zero_user(page, 0, PAGE_SIZE);
Johannes Weiner6058eae2020-06-03 16:02:40 -07004337 lru_cache_add(page);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004338 flush_dcache_page(page);
4339 SetPageUptodate(page);
4340 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004341 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004342 rdata->pages[i] = NULL;
4343 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07004344 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004345 } else {
4346 /* no need to hold page hostage */
Johannes Weiner6058eae2020-06-03 16:02:40 -07004347 lru_cache_add(page);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004348 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004349 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004350 rdata->pages[i] = NULL;
4351 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07004352 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004353 }
Jeff Layton8321fec2012-09-19 06:22:32 -07004354
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004355 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07004356 result = copy_page_from_iter(
4357 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07004358#ifdef CONFIG_CIFS_SMB_DIRECT
4359 else if (rdata->mr)
4360 result = n;
4361#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004362 else
Long Li1dbe3462018-05-30 12:47:55 -07004363 result = cifs_read_page_from_socket(
4364 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07004365 if (result < 0)
4366 break;
4367
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004368 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004369 }
4370
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004371 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
4372 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004373}
4374
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004375static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004376cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
4377 struct cifs_readdata *rdata, unsigned int len)
4378{
4379 return readpages_fill_pages(server, rdata, NULL, len);
4380}
4381
4382static int
4383cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
4384 struct cifs_readdata *rdata,
4385 struct iov_iter *iter)
4386{
4387 return readpages_fill_pages(server, rdata, iter, iter->count);
4388}
4389
4390static int
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004391readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
4392 unsigned int rsize, struct list_head *tmplist,
4393 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
4394{
4395 struct page *page, *tpage;
4396 unsigned int expected_index;
4397 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07004398 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004399
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004400 INIT_LIST_HEAD(tmplist);
4401
Nikolay Borisovf86196e2019-01-03 15:29:02 -08004402 page = lru_to_page(page_list);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004403
4404 /*
4405 * Lock the page and put it in the cache. Since no one else
4406 * should have access to this page, we're safe to simply set
4407 * PG_locked without checking it first.
4408 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004409 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004410 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07004411 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004412
4413 /* give up if we can't stick it in the cache */
4414 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004415 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004416 return rc;
4417 }
4418
4419 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004420 *offset = (loff_t)page->index << PAGE_SHIFT;
4421 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004422 *nr_pages = 1;
4423 list_move_tail(&page->lru, tmplist);
4424
4425 /* now try and add more pages onto the request */
4426 expected_index = page->index + 1;
4427 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
4428 /* discontinuity ? */
4429 if (page->index != expected_index)
4430 break;
4431
4432 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004433 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004434 break;
4435
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004436 __SetPageLocked(page);
Zhang Xiaoxu95a3d8f2020-06-22 05:30:19 -04004437 rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
4438 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004439 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004440 break;
4441 }
4442 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004443 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004444 expected_index++;
4445 (*nr_pages)++;
4446 }
4447 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004448}
4449
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450static int cifs_readpages(struct file *file, struct address_space *mapping,
4451 struct list_head *page_list, unsigned num_pages)
4452{
Jeff Layton690c5e32011-10-19 15:30:16 -04004453 int rc;
Zhang Xiaoxu95a3d8f2020-06-22 05:30:19 -04004454 int err = 0;
Jeff Layton690c5e32011-10-19 15:30:16 -04004455 struct list_head tmplist;
4456 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04004457 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004458 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04004459 pid_t pid;
Steve French0cb012d2018-10-11 01:01:02 -05004460 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461
Steve French0cb012d2018-10-11 01:01:02 -05004462 xid = get_xid();
Jeff Layton690c5e32011-10-19 15:30:16 -04004463 /*
Suresh Jayaraman566982362010-07-05 18:13:25 +05304464 * Reads as many pages as possible from fscache. Returns -ENOBUFS
4465 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00004466 *
4467 * After this point, every page in the list might have PG_fscache set,
4468 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman566982362010-07-05 18:13:25 +05304469 */
4470 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
4471 &num_pages);
Steve French0cb012d2018-10-11 01:01:02 -05004472 if (rc == 0) {
4473 free_xid(xid);
Jeff Layton690c5e32011-10-19 15:30:16 -04004474 return rc;
Steve French0cb012d2018-10-11 01:01:02 -05004475 }
Suresh Jayaraman566982362010-07-05 18:13:25 +05304476
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004477 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4478 pid = open_file->pid;
4479 else
4480 pid = current->tgid;
4481
Jeff Layton690c5e32011-10-19 15:30:16 -04004482 rc = 0;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004483 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484
Joe Perchesf96637b2013-05-04 22:12:25 -05004485 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4486 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04004487
4488 /*
4489 * Start with the page at end of list and move it to private
4490 * list. Do the same with any following pages until we hit
4491 * the rsize limit, hit an index discontinuity, or run out of
4492 * pages. Issue the async read and then start the loop again
4493 * until the list is empty.
4494 *
4495 * Note that list order is important. The page_list is in
4496 * the order of declining indexes. When we put the pages in
4497 * the rdata->pages, then we want them in increasing order.
4498 */
Zhang Xiaoxu95a3d8f2020-06-22 05:30:19 -04004499 while (!list_empty(page_list) && !err) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004500 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04004501 loff_t offset;
4502 struct page *page, *tpage;
4503 struct cifs_readdata *rdata;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004504 struct cifs_credits credits_on_stack;
4505 struct cifs_credits *credits = &credits_on_stack;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004507 if (open_file->invalidHandle) {
4508 rc = cifs_reopen_file(open_file, true);
4509 if (rc == -EAGAIN)
4510 continue;
4511 else if (rc)
4512 break;
4513 }
4514
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10004515 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004516 &rsize, credits);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004517 if (rc)
4518 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004519
Jeff Layton690c5e32011-10-19 15:30:16 -04004520 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004521 * Give up immediately if rsize is too small to read an entire
4522 * page. The VFS will fall back to readpage. We should never
4523 * reach this point however since we set ra_pages to 0 when the
4524 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04004525 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004526 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004527 add_credits_and_wake_if(server, credits, 0);
Steve French0cb012d2018-10-11 01:01:02 -05004528 free_xid(xid);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004529 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004530 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531
Zhang Xiaoxu95a3d8f2020-06-22 05:30:19 -04004532 nr_pages = 0;
4533 err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004534 &nr_pages, &offset, &bytes);
Zhang Xiaoxu95a3d8f2020-06-22 05:30:19 -04004535 if (!nr_pages) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004536 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004537 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04004538 }
4539
Jeff Layton0471ca32012-05-16 07:13:16 -04004540 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04004541 if (!rdata) {
4542 /* best to give up if we're out of mem */
4543 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4544 list_del(&page->lru);
Johannes Weiner6058eae2020-06-03 16:02:40 -07004545 lru_cache_add(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04004546 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004547 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04004548 }
4549 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004550 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04004551 break;
4552 }
4553
Jeff Layton6993f742012-05-16 07:13:17 -04004554 rdata->cfile = cifsFileInfo_get(open_file);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004555 rdata->server = server;
Jeff Layton690c5e32011-10-19 15:30:16 -04004556 rdata->mapping = mapping;
4557 rdata->offset = offset;
4558 rdata->bytes = bytes;
4559 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004560 rdata->pagesz = PAGE_SIZE;
Long Li1dbe3462018-05-30 12:47:55 -07004561 rdata->tailsz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07004562 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004563 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004564 rdata->credits = credits_on_stack;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004565
4566 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4567 list_del(&page->lru);
4568 rdata->pages[rdata->nr_pages++] = page;
4569 }
Jeff Layton690c5e32011-10-19 15:30:16 -04004570
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004571 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4572
4573 if (!rc) {
4574 if (rdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004575 rc = -EAGAIN;
4576 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004577 rc = server->ops->async_readv(rdata);
4578 }
4579
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004580 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004581 add_credits_and_wake_if(server, &rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004582 for (i = 0; i < rdata->nr_pages; i++) {
4583 page = rdata->pages[i];
Johannes Weiner6058eae2020-06-03 16:02:40 -07004584 lru_cache_add(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04004585 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004586 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004587 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04004588 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04004589 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004590 break;
4591 }
Jeff Layton6993f742012-05-16 07:13:17 -04004592
4593 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004594 }
4595
David Howells54afa992013-09-04 17:10:39 +00004596 /* Any pages that have been shown to fscache but didn't get added to
4597 * the pagecache must be uncached before they get returned to the
4598 * allocator.
4599 */
4600 cifs_fscache_readpages_cancel(mapping->host, page_list);
Steve French0cb012d2018-10-11 01:01:02 -05004601 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004602 return rc;
4603}
4604
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01004605/*
4606 * cifs_readpage_worker must be called with the page pinned
4607 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004608static int cifs_readpage_worker(struct file *file, struct page *page,
4609 loff_t *poffset)
4610{
4611 char *read_data;
4612 int rc;
4613
Suresh Jayaraman566982362010-07-05 18:13:25 +05304614 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05004615 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304616 if (rc == 0)
4617 goto read_complete;
4618
Linus Torvalds1da177e2005-04-16 15:20:36 -07004619 read_data = kmap(page);
4620 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004621
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004622 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004623
Linus Torvalds1da177e2005-04-16 15:20:36 -07004624 if (rc < 0)
4625 goto io_error;
4626 else
Joe Perchesf96637b2013-05-04 22:12:25 -05004627 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004628
Steve French9b9c5be2018-09-22 12:07:06 -05004629 /* we do not want atime to be less than mtime, it broke some apps */
4630 file_inode(file)->i_atime = current_time(file_inode(file));
4631 if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4632 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4633 else
4634 file_inode(file)->i_atime = current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004635
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004636 if (PAGE_SIZE > rc)
4637 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638
4639 flush_dcache_page(page);
4640 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304641
4642 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05004643 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304644
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004646
Linus Torvalds1da177e2005-04-16 15:20:36 -07004647io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004648 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004649 unlock_page(page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304650
4651read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652 return rc;
4653}
4654
4655static int cifs_readpage(struct file *file, struct page *page)
4656{
Steve Frenchf2a26a32021-07-23 18:35:15 -05004657 loff_t offset = page_file_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004659 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004661 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662
4663 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304664 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004665 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304666 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667 }
4668
Joe Perchesf96637b2013-05-04 22:12:25 -05004669 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00004670 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671
4672 rc = cifs_readpage_worker(file, page, &offset);
4673
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004674 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675 return rc;
4676}
4677
Steve Frencha403a0a2007-07-26 15:54:16 +00004678static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4679{
4680 struct cifsFileInfo *open_file;
4681
Dave Wysochanskicb248812019-10-03 15:16:27 +10004682 spin_lock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004683 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04004684 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Dave Wysochanskicb248812019-10-03 15:16:27 +10004685 spin_unlock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004686 return 1;
4687 }
4688 }
Dave Wysochanskicb248812019-10-03 15:16:27 +10004689 spin_unlock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004690 return 0;
4691}
4692
Linus Torvalds1da177e2005-04-16 15:20:36 -07004693/* We do not want to update the file size from server for inodes
4694 open for write - to avoid races with writepage extending
4695 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004696 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697 but this is tricky to do without racing with writebehind
4698 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00004699bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004700{
Steve Frencha403a0a2007-07-26 15:54:16 +00004701 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00004702 return true;
Steve French23e7dd72005-10-20 13:44:56 -07004703
Steve Frencha403a0a2007-07-26 15:54:16 +00004704 if (is_inode_writable(cifsInode)) {
4705 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08004706 struct cifs_sb_info *cifs_sb;
4707
Steve Frenchc32a0b62006-01-12 14:41:28 -08004708 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00004709 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004710 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08004711 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00004712 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08004713 }
4714
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004715 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00004716 return true;
Steve French7ba526312007-02-08 18:14:13 +00004717
Steve French4b18f2a2008-04-29 00:06:05 +00004718 return false;
Steve French23e7dd72005-10-20 13:44:56 -07004719 } else
Steve French4b18f2a2008-04-29 00:06:05 +00004720 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004721}
4722
Nick Piggind9414772008-09-24 11:32:59 -04004723static int cifs_write_begin(struct file *file, struct address_space *mapping,
4724 loff_t pos, unsigned len, unsigned flags,
4725 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004726{
Sachin Prabhu466bd312013-09-13 14:11:57 +01004727 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004728 pgoff_t index = pos >> PAGE_SHIFT;
4729 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004730 loff_t page_start = pos & PAGE_MASK;
4731 loff_t i_size;
4732 struct page *page;
4733 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004734
Joe Perchesf96637b2013-05-04 22:12:25 -05004735 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04004736
Sachin Prabhu466bd312013-09-13 14:11:57 +01004737start:
Nick Piggin54566b22009-01-04 12:00:53 -08004738 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004739 if (!page) {
4740 rc = -ENOMEM;
4741 goto out;
4742 }
Nick Piggind9414772008-09-24 11:32:59 -04004743
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004744 if (PageUptodate(page))
4745 goto out;
Steve French8a236262007-03-06 00:31:00 +00004746
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004747 /*
4748 * If we write a full page it will be up to date, no need to read from
4749 * the server. If the write is short, we'll end up doing a sync write
4750 * instead.
4751 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004752 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004753 goto out;
4754
4755 /*
4756 * optimize away the read when we have an oplock, and we're not
4757 * expecting to use any of the data we'd be reading in. That
4758 * is, when the page lies beyond the EOF, or straddles the EOF
4759 * and the write will cover all of the existing data.
4760 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004761 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004762 i_size = i_size_read(mapping->host);
4763 if (page_start >= i_size ||
4764 (offset == 0 && (pos + len) >= i_size)) {
4765 zero_user_segments(page, 0, offset,
4766 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004767 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004768 /*
4769 * PageChecked means that the parts of the page
4770 * to which we're not writing are considered up
4771 * to date. Once the data is copied to the
4772 * page, it can be set uptodate.
4773 */
4774 SetPageChecked(page);
4775 goto out;
4776 }
4777 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004778
Sachin Prabhu466bd312013-09-13 14:11:57 +01004779 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004780 /*
4781 * might as well read a page, it is fast enough. If we get
4782 * an error, we don't need to return it. cifs_write_end will
4783 * do a sync write instead since PG_uptodate isn't set.
4784 */
4785 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004786 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004787 oncethru = 1;
4788 goto start;
Steve French8a236262007-03-06 00:31:00 +00004789 } else {
4790 /* we could try using another file handle if there is one -
4791 but how would we lock it to prevent close of that handle
4792 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04004793 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00004794 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004795out:
4796 *pagep = page;
4797 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798}
4799
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304800static int cifs_release_page(struct page *page, gfp_t gfp)
4801{
4802 if (PagePrivate(page))
4803 return 0;
4804
4805 return cifs_fscache_release_page(page, gfp);
4806}
4807
Lukas Czernerd47992f2013-05-21 23:17:23 -04004808static void cifs_invalidate_page(struct page *page, unsigned int offset,
4809 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304810{
4811 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4812
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004813 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304814 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4815}
4816
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004817static int cifs_launder_page(struct page *page)
4818{
4819 int rc = 0;
4820 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004821 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004822 struct writeback_control wbc = {
4823 .sync_mode = WB_SYNC_ALL,
4824 .nr_to_write = 0,
4825 .range_start = range_start,
4826 .range_end = range_end,
4827 };
4828
Joe Perchesf96637b2013-05-04 22:12:25 -05004829 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004830
4831 if (clear_page_dirty_for_io(page))
4832 rc = cifs_writepage_locked(page, &wbc);
4833
4834 cifs_fscache_invalidate_page(page, page->mapping->host);
4835 return rc;
4836}
4837
Tejun Heo9b646972010-07-20 22:09:02 +02004838void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04004839{
4840 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4841 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00004842 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004843 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004844 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004845 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004846 int rc = 0;
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004847 bool purge_cache = false;
Rohith Surabattulac3f207a2021-04-13 00:26:42 -05004848 bool is_deferred = false;
4849 struct cifs_deferred_close *dclose;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004850
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004851 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10004852 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004853
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004854 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
4855 cfile->oplock_epoch, &purge_cache);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004856
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004857 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004858 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004859 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4860 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004861 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004862 }
4863
Jeff Layton3bc303c2009-09-21 06:47:50 -04004864 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004865 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05004866 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00004867 else
Al Viro8737c932009-12-24 06:47:55 -05004868 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004869 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004870 if (!CIFS_CACHE_READ(cinode) || purge_cache) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004871 rc = filemap_fdatawait(inode->i_mapping);
4872 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004873 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004874 }
Joe Perchesf96637b2013-05-04 22:12:25 -05004875 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004876 if (CIFS_CACHE_WRITE(cinode))
4877 goto oplock_break_ack;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004878 }
4879
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004880 rc = cifs_push_locks(cfile);
4881 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004882 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004883
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004884oplock_break_ack:
Jeff Layton3bc303c2009-09-21 06:47:50 -04004885 /*
Rohith Surabattula9e992752021-08-09 09:32:46 +00004886 * When oplock break is received and there are no active
4887 * file handles but cached, then schedule deferred close immediately.
4888 * So, new open will not use cached handle.
4889 */
4890 spin_lock(&CIFS_I(inode)->deferred_lock);
4891 is_deferred = cifs_is_deferred_close(cfile, &dclose);
4892 spin_unlock(&CIFS_I(inode)->deferred_lock);
4893 if (is_deferred &&
4894 cfile->deferred_close_scheduled &&
4895 delayed_work_pending(&cfile->deferred)) {
4896 if (cancel_delayed_work(&cfile->deferred)) {
4897 _cifsFileInfo_put(cfile, false, false);
4898 goto oplock_break_done;
4899 }
4900 }
4901 /*
Jeff Layton3bc303c2009-09-21 06:47:50 -04004902 * releasing stale oplock after recent reconnect of smb session using
4903 * a now incorrect file handle is not a data integrity issue but do
4904 * not bother sending an oplock release if session to server still is
4905 * disconnected since oplock already released by the server
4906 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00004907 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004908 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4909 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05004910 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004911 }
Rohith Surabattula9e992752021-08-09 09:32:46 +00004912oplock_break_done:
Ronnie Sahlberg32546a92019-11-03 13:06:37 +10004913 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004914 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004915}
4916
Steve Frenchdca69282013-11-11 16:42:37 -06004917/*
4918 * The presence of cifs_direct_io() in the address space ops vector
4919 * allowes open() O_DIRECT flags which would have failed otherwise.
4920 *
4921 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4922 * so this method should never be called.
4923 *
4924 * Direct IO is not yet supported in the cached mode.
4925 */
4926static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07004927cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06004928{
4929 /*
4930 * FIXME
4931 * Eventually need to support direct IO for non forcedirectio mounts
4932 */
4933 return -EINVAL;
4934}
4935
Steve French4e8aea32020-04-09 21:42:18 -05004936static int cifs_swap_activate(struct swap_info_struct *sis,
4937 struct file *swap_file, sector_t *span)
4938{
4939 struct cifsFileInfo *cfile = swap_file->private_data;
4940 struct inode *inode = swap_file->f_mapping->host;
4941 unsigned long blocks;
4942 long long isize;
4943
4944 cifs_dbg(FYI, "swap activate\n");
4945
4946 spin_lock(&inode->i_lock);
4947 blocks = inode->i_blocks;
4948 isize = inode->i_size;
4949 spin_unlock(&inode->i_lock);
4950 if (blocks*512 < isize) {
4951 pr_warn("swap activate: swapfile has holes\n");
4952 return -EINVAL;
4953 }
4954 *span = sis->pages;
4955
Joe Perchesa0a30362020-04-14 22:42:53 -07004956 pr_warn_once("Swap support over SMB3 is experimental\n");
Steve French4e8aea32020-04-09 21:42:18 -05004957
4958 /*
4959 * TODO: consider adding ACL (or documenting how) to prevent other
4960 * users (on this or other systems) from reading it
4961 */
4962
4963
4964 /* TODO: add sk_set_memalloc(inet) or similar */
4965
4966 if (cfile)
4967 cfile->swapfile = true;
4968 /*
4969 * TODO: Since file already open, we can't open with DENY_ALL here
4970 * but we could add call to grab a byte range lock to prevent others
4971 * from reading or writing the file
4972 */
4973
4974 return 0;
4975}
4976
4977static void cifs_swap_deactivate(struct file *file)
4978{
4979 struct cifsFileInfo *cfile = file->private_data;
4980
4981 cifs_dbg(FYI, "swap deactivate\n");
4982
4983 /* TODO: undo sk_set_memalloc(inet) will eventually be needed */
4984
4985 if (cfile)
4986 cfile->swapfile = false;
4987
4988 /* do we need to unpin (or unlock) the file */
4989}
Steve Frenchdca69282013-11-11 16:42:37 -06004990
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004991const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004992 .readpage = cifs_readpage,
4993 .readpages = cifs_readpages,
4994 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07004995 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004996 .write_begin = cifs_write_begin,
4997 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004998 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304999 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06005000 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05305001 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04005002 .launder_page = cifs_launder_page,
Steve French4e8aea32020-04-09 21:42:18 -05005003 /*
5004 * TODO: investigate and if useful we could add an cifs_migratePage
5005 * helper (under an CONFIG_MIGRATION) in the future, and also
5006 * investigate and add an is_dirty_writeback helper if needed
5007 */
5008 .swap_activate = cifs_swap_activate,
5009 .swap_deactivate = cifs_swap_deactivate,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00005011
5012/*
5013 * cifs_readpages requires the server to support a buffer large enough to
5014 * contain the header plus one complete page of data. Otherwise, we need
5015 * to leave cifs_readpages out of the address space operations.
5016 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07005017const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00005018 .readpage = cifs_readpage,
5019 .writepage = cifs_writepage,
5020 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04005021 .write_begin = cifs_write_begin,
5022 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00005023 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05305024 .releasepage = cifs_release_page,
5025 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04005026 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00005027};