blob: cd108607a070678c52aa8e0e860a92dda4ed97ee [file] [log] [blame]
Steve French929be902021-06-18 00:31:49 -05001// SPDX-License-Identifier: LGPL-2.1
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * fs/cifs/file.c
4 *
5 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00006 *
Steve Frenchf19159d2010-04-21 04:12:10 +00007 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00009 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070013#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/stat.h>
15#include <linux/fcntl.h>
16#include <linux/pagemap.h>
17#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070018#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080019#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070020#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040021#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040023#include <linux/swap.h>
Nikolay Borisovf86196e2019-01-03 15:29:02 -080024#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <asm/div64.h>
26#include "cifsfs.h"
27#include "cifspdu.h"
28#include "cifsglob.h"
29#include "cifsproto.h"
30#include "cifs_unicode.h"
31#include "cifs_debug.h"
32#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053033#include "fscache.h"
Long Libd3dcc62017-11-22 17:38:47 -070034#include "smbdirect.h"
Ronnie Sahlberg8401e932020-12-12 13:40:50 -060035#include "fs_context.h"
Steve French087f7572021-04-29 00:18:43 -050036#include "cifs_ioctl.h"
Steve French07b92d02013-02-18 10:34:26 -060037
Linus Torvalds1da177e2005-04-16 15:20:36 -070038static inline int cifs_convert_flags(unsigned int flags)
39{
40 if ((flags & O_ACCMODE) == O_RDONLY)
41 return GENERIC_READ;
42 else if ((flags & O_ACCMODE) == O_WRONLY)
43 return GENERIC_WRITE;
44 else if ((flags & O_ACCMODE) == O_RDWR) {
45 /* GENERIC_ALL is too much permission to request
46 can cause unnecessary access denied on create */
47 /* return GENERIC_ALL; */
48 return (GENERIC_READ | GENERIC_WRITE);
49 }
50
Jeff Laytone10f7b52008-05-14 10:21:33 -070051 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
52 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
53 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000054}
Jeff Laytone10f7b52008-05-14 10:21:33 -070055
Jeff Layton608712f2010-10-15 15:33:56 -040056static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000057{
Jeff Layton608712f2010-10-15 15:33:56 -040058 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070059
Steve French7fc8f4e2009-02-23 20:43:11 +000060 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040061 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000062 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040063 posix_flags = SMB_O_WRONLY;
64 else if ((flags & O_ACCMODE) == O_RDWR)
65 posix_flags = SMB_O_RDWR;
66
Steve French07b92d02013-02-18 10:34:26 -060067 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040068 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060069 if (flags & O_EXCL)
70 posix_flags |= SMB_O_EXCL;
71 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050072 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
73 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060074
Jeff Layton608712f2010-10-15 15:33:56 -040075 if (flags & O_TRUNC)
76 posix_flags |= SMB_O_TRUNC;
77 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010078 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040079 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000080 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040081 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000082 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000086
87 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088}
89
90static inline int cifs_get_disposition(unsigned int flags)
91{
92 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
93 return FILE_CREATE;
94 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
95 return FILE_OVERWRITE_IF;
96 else if ((flags & O_CREAT) == O_CREAT)
97 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +000098 else if ((flags & O_TRUNC) == O_TRUNC)
99 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 else
101 return FILE_OPEN;
102}
103
Al Virof6f1f172021-03-18 15:44:05 -0400104int cifs_posix_open(const char *full_path, struct inode **pinode,
Jeff Layton608712f2010-10-15 15:33:56 -0400105 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400106 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400107{
108 int rc;
109 FILE_UNIX_BASIC_INFO *presp_data;
110 __u32 posix_flags = 0;
111 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
112 struct cifs_fattr fattr;
113 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000114 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400115
Joe Perchesf96637b2013-05-04 22:12:25 -0500116 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400117
118 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
119 if (presp_data == NULL)
120 return -ENOMEM;
121
122 tlink = cifs_sb_tlink(cifs_sb);
123 if (IS_ERR(tlink)) {
124 rc = PTR_ERR(tlink);
125 goto posix_open_ret;
126 }
127
128 tcon = tlink_tcon(tlink);
129 mode &= ~current_umask();
130
131 posix_flags = cifs_posix_convert_flags(f_flags);
132 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
133 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900134 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400135 cifs_put_tlink(tlink);
136
137 if (rc)
138 goto posix_open_ret;
139
140 if (presp_data->Type == cpu_to_le32(-1))
141 goto posix_open_ret; /* open ok, caller does qpathinfo */
142
143 if (!pinode)
144 goto posix_open_ret; /* caller does not need info */
145
146 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
147
148 /* get new inode and set it up */
149 if (*pinode == NULL) {
150 cifs_fill_uniqueid(sb, &fattr);
151 *pinode = cifs_iget(sb, &fattr);
152 if (!*pinode) {
153 rc = -ENOMEM;
154 goto posix_open_ret;
155 }
156 } else {
Ronnie Sahlbergcee8f4f2021-03-25 16:26:35 +1000157 cifs_revalidate_mapping(*pinode);
Al Viro4d669522021-02-10 21:23:04 -0500158 rc = cifs_fattr_to_inode(*pinode, &fattr);
Jeff Layton608712f2010-10-15 15:33:56 -0400159 }
160
161posix_open_ret:
162 kfree(presp_data);
163 return rc;
164}
165
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300166static int
Al Virof6f1f172021-03-18 15:44:05 -0400167cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700168 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
169 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170{
171 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300173 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500174 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300175 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700176 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400177 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300178
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700179 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700180 return -ENOSYS;
181
182 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300183
184/*********************************************************************
185 * open flag mapping table:
186 *
187 * POSIX Flag CIFS Disposition
188 * ---------- ----------------
189 * O_CREAT FILE_OPEN_IF
190 * O_CREAT | O_EXCL FILE_CREATE
191 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
192 * O_TRUNC FILE_OVERWRITE
193 * none of the above FILE_OPEN
194 *
195 * Note that there is not a direct match between disposition
196 * FILE_SUPERSEDE (ie create whether or not file exists although
197 * O_CREAT | O_TRUNC is similar but truncates the existing
198 * file rather than creating a new file as FILE_SUPERSEDE does
199 * (which uses the attributes / metadata passed in on open call)
200 *?
201 *? O_SYNC is a reasonable match to CIFS writethrough flag
202 *? and the read write flags match reasonably. O_LARGEFILE
203 *? is irrelevant because largefile support is always used
204 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
205 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
206 *********************************************************************/
207
208 disposition = cifs_get_disposition(f_flags);
209
210 /* BB pass O_SYNC flag through on file attributes .. BB */
211
212 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
213 if (!buf)
214 return -ENOMEM;
215
Steve French1013e762017-09-22 01:40:27 -0500216 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
217 if (f_flags & O_SYNC)
218 create_options |= CREATE_WRITE_THROUGH;
219
220 if (f_flags & O_DIRECT)
221 create_options |= CREATE_NO_BUFFER;
222
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400223 oparms.tcon = tcon;
224 oparms.cifs_sb = cifs_sb;
225 oparms.desired_access = desired_access;
Amir Goldstein0f060932020-02-03 21:46:43 +0200226 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400227 oparms.disposition = disposition;
228 oparms.path = full_path;
229 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400230 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400231
232 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300233
234 if (rc)
235 goto out;
236
Steve Frenchd3138522020-06-11 22:43:01 -0500237 /* TODO: Add support for calling posix query info but with passing in fid */
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300238 if (tcon->unix_ext)
239 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
240 xid);
241 else
242 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600243 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300244
Pavel Shilovsky30573a822019-09-30 10:06:18 -0700245 if (rc) {
246 server->ops->close(xid, tcon, fid);
247 if (rc == -ESTALE)
248 rc = -EOPENSTALE;
249 }
250
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300251out:
252 kfree(buf);
253 return rc;
254}
255
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400256static bool
257cifs_has_mand_locks(struct cifsInodeInfo *cinode)
258{
259 struct cifs_fid_locks *cur;
260 bool has_locks = false;
261
262 down_read(&cinode->lock_sem);
263 list_for_each_entry(cur, &cinode->llist, llist) {
264 if (!list_empty(&cur->locks)) {
265 has_locks = true;
266 break;
267 }
268 }
269 up_read(&cinode->lock_sem);
270 return has_locks;
271}
272
Dave Wysochanskid46b0da2019-10-23 05:02:33 -0400273void
274cifs_down_write(struct rw_semaphore *sem)
275{
276 while (!down_write_trylock(sem))
277 msleep(10);
278}
279
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000280static void cifsFileInfo_put_work(struct work_struct *work);
281
Jeff Layton15ecb432010-10-15 15:34:02 -0400282struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700283cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400284 struct tcon_link *tlink, __u32 oplock)
285{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500286 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000287 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700288 struct cifsInodeInfo *cinode = CIFS_I(inode);
289 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700290 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700291 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400292 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400293
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700294 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
295 if (cfile == NULL)
296 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400297
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700298 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
299 if (!fdlocks) {
300 kfree(cfile);
301 return NULL;
302 }
303
304 INIT_LIST_HEAD(&fdlocks->locks);
305 fdlocks->cfile = cfile;
306 cfile->llist = fdlocks;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700307
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700308 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700309 cfile->pid = current->tgid;
310 cfile->uid = current_fsuid();
311 cfile->dentry = dget(dentry);
312 cfile->f_flags = file->f_flags;
313 cfile->invalidHandle = false;
Rohith Surabattula860b69a2021-05-05 10:56:47 +0000314 cfile->deferred_close_scheduled = false;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700315 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700316 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000317 INIT_WORK(&cfile->put, cifsFileInfo_put_work);
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500318 INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700319 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500320 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400321
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100322 cifs_sb_active(inode->i_sb);
323
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400324 /*
325 * If the server returned a read oplock and we have mandatory brlocks,
326 * set oplock level to None.
327 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400328 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500329 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400330 oplock = 0;
331 }
332
Pavel Shilovsky6f582b22019-11-27 16:18:39 -0800333 cifs_down_write(&cinode->lock_sem);
334 list_add(&fdlocks->llist, &cinode->llist);
335 up_write(&cinode->lock_sem);
336
Steve French3afca262016-09-22 18:58:16 -0500337 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400338 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700339 oplock = fid->pending_open->oplock;
340 list_del(&fid->pending_open->olist);
341
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400342 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400343 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700344
345 list_add(&cfile->tlist, &tcon->openFileList);
Steve Frenchfae80442018-10-19 17:14:32 -0500346 atomic_inc(&tcon->num_local_opens);
Steve French3afca262016-09-22 18:58:16 -0500347
Jeff Layton15ecb432010-10-15 15:34:02 -0400348 /* if readable file instance put first in list*/
Ronnie Sahlberg487317c2019-06-05 10:38:38 +1000349 spin_lock(&cinode->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400350 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700351 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400352 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700353 list_add_tail(&cfile->flist, &cinode->openFileList);
Ronnie Sahlberg487317c2019-06-05 10:38:38 +1000354 spin_unlock(&cinode->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500355 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400356
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400357 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400358 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400359
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700360 file->private_data = cfile;
361 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400362}
363
Jeff Layton764a1b12012-07-25 14:59:54 -0400364struct cifsFileInfo *
365cifsFileInfo_get(struct cifsFileInfo *cifs_file)
366{
Steve French3afca262016-09-22 18:58:16 -0500367 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400368 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500369 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400370 return cifs_file;
371}
372
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000373static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
374{
375 struct inode *inode = d_inode(cifs_file->dentry);
376 struct cifsInodeInfo *cifsi = CIFS_I(inode);
377 struct cifsLockInfo *li, *tmp;
378 struct super_block *sb = inode->i_sb;
379
380 /*
381 * Delete any outstanding lock records. We'll lose them when the file
382 * is closed anyway.
383 */
384 cifs_down_write(&cifsi->lock_sem);
385 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
386 list_del(&li->llist);
387 cifs_del_lock_waiters(li);
388 kfree(li);
389 }
390 list_del(&cifs_file->llist->llist);
391 kfree(cifs_file->llist);
392 up_write(&cifsi->lock_sem);
393
394 cifs_put_tlink(cifs_file->tlink);
395 dput(cifs_file->dentry);
396 cifs_sb_deactive(sb);
397 kfree(cifs_file);
398}
399
400static void cifsFileInfo_put_work(struct work_struct *work)
401{
402 struct cifsFileInfo *cifs_file = container_of(work,
403 struct cifsFileInfo, put);
404
405 cifsFileInfo_put_final(cifs_file);
406}
407
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100408/**
409 * cifsFileInfo_put - release a reference of file priv data
410 *
411 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
Steve French607dfc72020-12-12 12:08:58 -0600412 *
413 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000414 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400415void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
416{
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000417 _cifsFileInfo_put(cifs_file, true, true);
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100418}
419
420/**
421 * _cifsFileInfo_put - release a reference of file priv data
422 *
423 * This may involve closing the filehandle @cifs_file out on the
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000424 * server. Must be called without holding tcon->open_file_lock,
425 * cinode->open_file_lock and cifs_file->file_info_lock.
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100426 *
427 * If @wait_for_oplock_handler is true and we are releasing the last
428 * reference, wait for any running oplock break handler of the file
Steve French607dfc72020-12-12 12:08:58 -0600429 * and cancel any pending one.
430 *
431 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
432 * @wait_oplock_handler: must be false if called from oplock_break_handler
433 * @offload: not offloaded on close and oplock breaks
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100434 *
435 */
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000436void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
437 bool wait_oplock_handler, bool offload)
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100438{
David Howells2b0143b2015-03-17 22:25:59 +0000439 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000440 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700441 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300442 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100443 struct super_block *sb = inode->i_sb;
444 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700445 struct cifs_fid fid;
446 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000447 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000448
Steve French3afca262016-09-22 18:58:16 -0500449 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky1a67c412019-10-23 15:37:19 -0700450 spin_lock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500451 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400452 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500453 spin_unlock(&cifs_file->file_info_lock);
Pavel Shilovsky1a67c412019-10-23 15:37:19 -0700454 spin_unlock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500455 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000456 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400457 }
Steve French3afca262016-09-22 18:58:16 -0500458 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000459
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700460 if (server->ops->get_lease_key)
461 server->ops->get_lease_key(inode, &fid);
462
463 /* store open in pending opens to make sure we don't miss lease break */
464 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
465
Steve Frenchcdff08e2010-10-21 22:46:14 +0000466 /* remove it from the lists */
467 list_del(&cifs_file->flist);
468 list_del(&cifs_file->tlist);
Steve Frenchfae80442018-10-19 17:14:32 -0500469 atomic_dec(&tcon->num_local_opens);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000470
471 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500472 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000473 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700474 /*
475 * In strict cache mode we need invalidate mapping on the last
476 * close because it may cause a error when we open this file
477 * again and get at least level II oplock.
478 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300479 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400480 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300481 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000482 }
Steve French3afca262016-09-22 18:58:16 -0500483
Pavel Shilovsky1a67c412019-10-23 15:37:19 -0700484 spin_unlock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500485 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000486
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100487 oplock_break_cancelled = wait_oplock_handler ?
488 cancel_work_sync(&cifs_file->oplock_break) : false;
Jeff Laytonad635942011-07-26 12:20:17 -0400489
Steve Frenchcdff08e2010-10-21 22:46:14 +0000490 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700491 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400492 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700493
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400494 xid = get_xid();
Steve French43f8a6a2019-12-02 21:46:54 -0600495 if (server->ops->close_getattr)
496 server->ops->close_getattr(xid, tcon, cifs_file);
497 else if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400498 server->ops->close(xid, tcon, &cifs_file->fid);
499 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000500 }
501
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000502 if (oplock_break_cancelled)
503 cifs_done_oplock_break(cifsi);
504
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700505 cifs_del_pending_open(&open);
506
Ronnie Sahlberg32546a92019-11-03 13:06:37 +1000507 if (offload)
508 queue_work(fileinfo_put_wq, &cifs_file->put);
509 else
510 cifsFileInfo_put_final(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400511}
512
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700514
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515{
516 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400517 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400518 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700520 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000521 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400522 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700523 struct cifsFileInfo *cfile = NULL;
Al Virof6a9bc32021-03-05 17:36:04 -0500524 void *page;
525 const char *full_path;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300526 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700527 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700528 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400530 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
532 cifs_sb = CIFS_SB(inode->i_sb);
Steve French087f7572021-04-29 00:18:43 -0500533 if (unlikely(cifs_forced_shutdown(cifs_sb))) {
534 free_xid(xid);
535 return -EIO;
536 }
537
Jeff Layton7ffec372010-09-29 19:51:11 -0400538 tlink = cifs_sb_tlink(cifs_sb);
539 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400540 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400541 return PTR_ERR(tlink);
542 }
543 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700544 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545
Al Virof6a9bc32021-03-05 17:36:04 -0500546 page = alloc_dentry_path();
547 full_path = build_path_from_dentry(file_dentry(file), page);
548 if (IS_ERR(full_path)) {
549 rc = PTR_ERR(full_path);
Jeff Layton232341b2010-08-05 13:58:38 -0400550 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 }
552
Joe Perchesf96637b2013-05-04 22:12:25 -0500553 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000554 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000555
Namjae Jeon787aded2014-08-22 14:22:51 +0900556 if (file->f_flags & O_DIRECT &&
557 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
558 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
559 file->f_op = &cifs_file_direct_nobrl_ops;
560 else
561 file->f_op = &cifs_file_direct_ops;
562 }
563
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500564 /* Get the cached handle as SMB2 close is deferred */
565 rc = cifs_get_readable_path(tcon, full_path, &cfile);
566 if (rc == 0) {
567 if (file->f_flags == cfile->f_flags) {
568 file->private_data = cfile;
Rohith Surabattula860b69a2021-05-05 10:56:47 +0000569 spin_lock(&CIFS_I(inode)->deferred_lock);
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500570 cifs_del_deferred_close(cfile);
571 spin_unlock(&CIFS_I(inode)->deferred_lock);
572 goto out;
573 } else {
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500574 _cifsFileInfo_put(cfile, true, false);
575 }
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500576 }
577
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700578 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000579 oplock = REQ_OPLOCK;
580 else
581 oplock = 0;
582
Steve French64cc2c62009-03-04 19:54:08 +0000583 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400584 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
585 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000586 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400587 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Ronnie Sahlberg8401e932020-12-12 13:40:50 -0600588 cifs_sb->ctx->file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700589 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000590 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500591 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300592 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000593 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
594 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500595 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
Steve Frenchb438fcf2021-02-20 19:24:11 -0600596 tcon->ses->ip_addr,
Joe Perchesf96637b2013-05-04 22:12:25 -0500597 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000598 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000599 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
600 (rc != -EOPNOTSUPP)) /* path not found or net err */
601 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700602 /*
603 * Else fallthrough to retry open the old way on network i/o
604 * or DFS errors.
605 */
Steve French276a74a2009-03-03 18:00:34 +0000606 }
607
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700608 if (server->ops->get_lease_key)
609 server->ops->get_lease_key(inode, &fid);
610
611 cifs_add_pending_open(&fid, tlink, &open);
612
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300613 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700614 if (server->ops->get_lease_key)
615 server->ops->get_lease_key(inode, &fid);
616
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300617 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700618 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700619 if (rc) {
620 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300621 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700622 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300623 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400624
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700625 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
626 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700627 if (server->ops->close)
628 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700629 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 rc = -ENOMEM;
631 goto out;
632 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530634 cifs_fscache_set_inode_cookie(inode, file);
635
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300636 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700637 /*
638 * Time to set mode which we can not set earlier due to
639 * problems creating new read-only files.
640 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300641 struct cifs_unix_set_info_args args = {
642 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800643 .uid = INVALID_UID, /* no change */
644 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300645 .ctime = NO_CHANGE_64,
646 .atime = NO_CHANGE_64,
647 .mtime = NO_CHANGE_64,
648 .device = 0,
649 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700650 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
651 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 }
653
654out:
Al Virof6a9bc32021-03-05 17:36:04 -0500655 free_dentry_path(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400656 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400657 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 return rc;
659}
660
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400661static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
662
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700663/*
664 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400665 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700666 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400667static int
668cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400670 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000671 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400672 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 int rc = 0;
674
Rabin Vincent560d3882017-05-03 17:17:21 +0200675 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400676 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400677 /* can cache locks - no need to relock */
678 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400679 return rc;
680 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400682 if (cap_unix(tcon->ses) &&
683 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
684 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
685 rc = cifs_push_posix_locks(cfile);
686 else
687 rc = tcon->ses->server->ops->push_mand_locks(cfile);
688
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400689 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 return rc;
691}
692
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700693static int
694cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695{
696 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400697 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400698 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000700 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700701 struct TCP_Server_Info *server;
702 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000703 struct inode *inode;
Al Virof6a9bc32021-03-05 17:36:04 -0500704 void *page;
705 const char *full_path;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700706 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500708 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400709 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400711 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700712 mutex_lock(&cfile->fh_mutex);
713 if (!cfile->invalidHandle) {
714 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400715 free_xid(xid);
Al Virof6a9bc32021-03-05 17:36:04 -0500716 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 }
718
David Howells2b0143b2015-03-17 22:25:59 +0000719 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700721 tcon = tlink_tcon(cfile->tlink);
722 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000723
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700724 /*
725 * Can not grab rename sem here because various ops, including those
726 * that already have the rename sem can end up causing writepage to get
727 * called and if the server was down that means we end up here, and we
728 * can never tell if the caller already has the rename_sem.
729 */
Al Virof6a9bc32021-03-05 17:36:04 -0500730 page = alloc_dentry_path();
731 full_path = build_path_from_dentry(cfile->dentry, page);
732 if (IS_ERR(full_path)) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700733 mutex_unlock(&cfile->fh_mutex);
Al Virof6a9bc32021-03-05 17:36:04 -0500734 free_dentry_path(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400735 free_xid(xid);
Al Virof6a9bc32021-03-05 17:36:04 -0500736 return PTR_ERR(full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 }
738
Joe Perchesf96637b2013-05-04 22:12:25 -0500739 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
740 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300742 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 oplock = REQ_OPLOCK;
744 else
Steve French4b18f2a2008-04-29 00:06:05 +0000745 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400747 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000748 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400749 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400750 /*
751 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
752 * original open. Must mask them off for a reopen.
753 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700754 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400755 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400756
Jeff Layton2422f672010-06-16 13:40:16 -0400757 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Ronnie Sahlberg8401e932020-12-12 13:40:50 -0600758 cifs_sb->ctx->file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400759 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000760 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500761 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200762 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000763 goto reopen_success;
764 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700765 /*
766 * fallthrough to retry open the old way on errors, especially
767 * in the reconnect path it is important to retry hard
768 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000769 }
770
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700771 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000772
Pavel Shilovsky44805b02019-11-12 17:16:35 -0800773 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
774 if (cfile->f_flags & O_SYNC)
775 create_options |= CREATE_WRITE_THROUGH;
776
777 if (cfile->f_flags & O_DIRECT)
778 create_options |= CREATE_NO_BUFFER;
779
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700780 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400781 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700782
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400783 oparms.tcon = tcon;
784 oparms.cifs_sb = cifs_sb;
785 oparms.desired_access = desired_access;
Amir Goldstein0f060932020-02-03 21:46:43 +0200786 oparms.create_options = cifs_create_options(cifs_sb, create_options);
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400787 oparms.disposition = disposition;
788 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400789 oparms.fid = &cfile->fid;
790 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400791
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700792 /*
793 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400794 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700795 * file might have write behind data that needs to be flushed and server
796 * version of file size can be stale. If we knew for sure that inode was
797 * not dirty locally we could do this.
798 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400799 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400800 if (rc == -ENOENT && oparms.reconnect == false) {
801 /* durable handle timeout is expired - open the file again */
802 rc = server->ops->open(xid, &oparms, &oplock, NULL);
803 /* indicate that we need to relock the file */
804 oparms.reconnect = true;
805 }
806
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700808 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500809 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
810 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400811 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 }
Jeff Layton15886172010-10-15 15:33:59 -0400813
814reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700815 cfile->invalidHandle = false;
816 mutex_unlock(&cfile->fh_mutex);
817 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400818
819 if (can_flush) {
820 rc = filemap_write_and_wait(inode->i_mapping);
Pavel Shilovsky9a663962019-01-08 11:15:28 -0800821 if (!is_interrupt_error(rc))
822 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400823
Steve Frenchd3138522020-06-11 22:43:01 -0500824 if (tcon->posix_extensions)
825 rc = smb311_posix_get_inode_info(&inode, full_path, inode->i_sb, xid);
826 else if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700827 rc = cifs_get_inode_info_unix(&inode, full_path,
828 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400829 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700830 rc = cifs_get_inode_info(&inode, full_path, NULL,
831 inode->i_sb, xid, NULL);
832 }
833 /*
834 * Else we are writing out data to server already and could deadlock if
835 * we tried to flush data, and since we do not know if we have data that
836 * would invalidate the current end of file on the server we can not go
837 * to the server to get the new inode info.
838 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300839
Pavel Shilovskyde740252016-10-11 15:34:07 -0700840 /*
841 * If the server returned a read oplock and we have mandatory brlocks,
842 * set oplock level to None.
843 */
844 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
845 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
846 oplock = 0;
847 }
848
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400849 server->ops->set_fid(cfile, &cfile->fid, oplock);
850 if (oparms.reconnect)
851 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400852
853reopen_error_exit:
Al Virof6a9bc32021-03-05 17:36:04 -0500854 free_dentry_path(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400855 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 return rc;
857}
858
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500859void smb2_deferred_work_close(struct work_struct *work)
860{
861 struct cifsFileInfo *cfile = container_of(work,
862 struct cifsFileInfo, deferred.work);
863
864 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
865 cifs_del_deferred_close(cfile);
Rohith Surabattula860b69a2021-05-05 10:56:47 +0000866 cfile->deferred_close_scheduled = false;
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500867 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
868 _cifsFileInfo_put(cfile, true, false);
869}
870
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871int cifs_close(struct inode *inode, struct file *file)
872{
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500873 struct cifsFileInfo *cfile;
874 struct cifsInodeInfo *cinode = CIFS_I(inode);
875 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
876 struct cifs_deferred_close *dclose;
877
Jeff Layton77970692011-04-05 16:23:47 -0700878 if (file->private_data != NULL) {
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500879 cfile = file->private_data;
Jeff Layton77970692011-04-05 16:23:47 -0700880 file->private_data = NULL;
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500881 dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
882 if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
Rohith Surabattula0ab95c22021-05-17 11:28:34 +0000883 cinode->lease_granted &&
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500884 dclose) {
885 if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags))
886 inode->i_ctime = inode->i_mtime = current_time(inode);
887 spin_lock(&cinode->deferred_lock);
888 cifs_add_deferred_close(cfile, dclose);
Rohith Surabattula860b69a2021-05-05 10:56:47 +0000889 if (cfile->deferred_close_scheduled &&
890 delayed_work_pending(&cfile->deferred)) {
Rohith Surabattula9687c852021-05-20 16:45:01 +0000891 /*
892 * If there is no pending work, mod_delayed_work queues new work.
893 * So, Increase the ref count to avoid use-after-free.
894 */
895 if (!mod_delayed_work(deferredclose_wq,
896 &cfile->deferred, cifs_sb->ctx->acregmax))
897 cifsFileInfo_get(cfile);
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500898 } else {
899 /* Deferred close for files */
900 queue_delayed_work(deferredclose_wq,
901 &cfile->deferred, cifs_sb->ctx->acregmax);
Rohith Surabattula860b69a2021-05-05 10:56:47 +0000902 cfile->deferred_close_scheduled = true;
Rohith Surabattulac3f207a2021-04-13 00:26:42 -0500903 spin_unlock(&cinode->deferred_lock);
904 return 0;
905 }
906 spin_unlock(&cinode->deferred_lock);
907 _cifsFileInfo_put(cfile, true, false);
908 } else {
909 _cifsFileInfo_put(cfile, true, false);
910 kfree(dclose);
911 }
Jeff Layton77970692011-04-05 16:23:47 -0700912 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Steve Frenchcdff08e2010-10-21 22:46:14 +0000914 /* return code from the ->release op is always ignored */
915 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916}
917
Steve French52ace1e2016-09-22 19:23:56 -0500918void
919cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
920{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700921 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500922 struct list_head *tmp;
923 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700924 struct list_head tmp_list;
925
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800926 if (!tcon->use_persistent || !tcon->need_reopen_files)
927 return;
928
929 tcon->need_reopen_files = false;
930
Joe Perchesa0a30362020-04-14 22:42:53 -0700931 cifs_dbg(FYI, "Reopen persistent handles\n");
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700932 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500933
934 /* list all files open on tree connection, reopen resilient handles */
935 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700936 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500937 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700938 if (!open_file->invalidHandle)
939 continue;
940 cifsFileInfo_get(open_file);
941 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500942 }
943 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700944
945 list_for_each_safe(tmp, tmp1, &tmp_list) {
946 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800947 if (cifs_reopen_file(open_file, false /* do not flush */))
948 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700949 list_del_init(&open_file->rlist);
950 cifsFileInfo_put(open_file);
951 }
Steve French52ace1e2016-09-22 19:23:56 -0500952}
953
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954int cifs_closedir(struct inode *inode, struct file *file)
955{
956 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400957 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700958 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700959 struct cifs_tcon *tcon;
960 struct TCP_Server_Info *server;
961 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
Joe Perchesf96637b2013-05-04 22:12:25 -0500963 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700965 if (cfile == NULL)
966 return rc;
967
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400968 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700969 tcon = tlink_tcon(cfile->tlink);
970 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
Joe Perchesf96637b2013-05-04 22:12:25 -0500972 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500973 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400974 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700975 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500976 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700977 if (server->ops->close_dir)
978 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
979 else
980 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500981 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700982 /* not much we can do if it fails anyway, ignore rc */
983 rc = 0;
984 } else
Steve French3afca262016-09-22 18:58:16 -0500985 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700987 buf = cfile->srch_inf.ntwrk_buf_start;
988 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500989 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700990 cfile->srch_inf.ntwrk_buf_start = NULL;
991 if (cfile->srch_inf.smallBuf)
992 cifs_small_buf_release(buf);
993 else
994 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700996
997 cifs_put_tlink(cfile->tlink);
998 kfree(file->private_data);
999 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001001 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 return rc;
1003}
1004
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001005static struct cifsLockInfo *
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001006cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001007{
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001008 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001009 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001010 if (!lock)
1011 return lock;
1012 lock->offset = offset;
1013 lock->length = length;
1014 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001015 lock->pid = current->tgid;
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001016 lock->flags = flags;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001017 INIT_LIST_HEAD(&lock->blist);
1018 init_waitqueue_head(&lock->block_q);
1019 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001020}
1021
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001022void
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001023cifs_del_lock_waiters(struct cifsLockInfo *lock)
1024{
1025 struct cifsLockInfo *li, *tmp;
1026 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1027 list_del_init(&li->blist);
1028 wake_up(&li->block_q);
1029 }
1030}
1031
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001032#define CIFS_LOCK_OP 0
1033#define CIFS_READ_OP 1
1034#define CIFS_WRITE_OP 2
1035
1036/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001037static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001038cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001039 __u64 length, __u8 type, __u16 flags,
1040 struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001041 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001042{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001043 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001044 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001045 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001046
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001047 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001048 if (offset + length <= li->offset ||
1049 offset >= li->offset + li->length)
1050 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001051 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1052 server->ops->compare_fids(cfile, cur_cfile)) {
1053 /* shared lock prevents write op through the same fid */
1054 if (!(li->type & server->vals->shared_lock_type) ||
1055 rw_check != CIFS_WRITE_OP)
1056 continue;
1057 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001058 if ((type & server->vals->shared_lock_type) &&
1059 ((server->ops->compare_fids(cfile, cur_cfile) &&
1060 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001061 continue;
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001062 if (rw_check == CIFS_LOCK_OP &&
1063 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1064 server->ops->compare_fids(cfile, cur_cfile))
1065 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -07001066 if (conf_lock)
1067 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001068 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001069 }
1070 return false;
1071}
1072
Pavel Shilovsky579f9052012-09-19 06:22:44 -07001073bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001074cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001075 __u8 type, __u16 flags,
1076 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001077{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001078 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001079 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +00001080 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001081
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001082 list_for_each_entry(cur, &cinode->llist, llist) {
1083 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001084 flags, cfile, conf_lock,
1085 rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001086 if (rc)
1087 break;
1088 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001089
1090 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001091}
1092
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001093/*
1094 * Check if there is another lock that prevents us to set the lock (mandatory
1095 * style). If such a lock exists, update the flock structure with its
1096 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1097 * or leave it the same if we can't. Returns 0 if we don't need to request to
1098 * the server or 1 otherwise.
1099 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001100static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001101cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1102 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001103{
1104 int rc = 0;
1105 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001106 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001107 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001108 bool exist;
1109
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001110 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001111
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001112 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001113 flock->fl_flags, &conf_lock,
1114 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001115 if (exist) {
1116 flock->fl_start = conf_lock->offset;
1117 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1118 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001119 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001120 flock->fl_type = F_RDLCK;
1121 else
1122 flock->fl_type = F_WRLCK;
1123 } else if (!cinode->can_cache_brlcks)
1124 rc = 1;
1125 else
1126 flock->fl_type = F_UNLCK;
1127
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001128 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001129 return rc;
1130}
1131
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001132static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001133cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001134{
David Howells2b0143b2015-03-17 22:25:59 +00001135 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001136 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001137 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001138 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001139}
1140
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001141/*
1142 * Set the byte-range lock (mandatory style). Returns:
1143 * 1) 0, if we set the lock and don't need to request to the server;
1144 * 2) 1, if no locks prevent us but we need to request to the server;
Colin Ian King413d6102018-10-26 19:07:21 +01001145 * 3) -EACCES, if there is a lock that prevents us and wait is false.
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001146 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001147static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001148cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001149 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001150{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001151 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001152 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001153 bool exist;
1154 int rc = 0;
1155
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001156try_again:
1157 exist = false;
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001158 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001159
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001160 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001161 lock->type, lock->flags, &conf_lock,
1162 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001163 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001164 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001165 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001166 return rc;
1167 }
1168
1169 if (!exist)
1170 rc = 1;
1171 else if (!wait)
1172 rc = -EACCES;
1173 else {
1174 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001175 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001176 rc = wait_event_interruptible(lock->block_q,
1177 (lock->blist.prev == &lock->blist) &&
1178 (lock->blist.next == &lock->blist));
1179 if (!rc)
1180 goto try_again;
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001181 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001182 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001183 }
1184
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001185 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001186 return rc;
1187}
1188
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001189/*
1190 * Check if there is another lock that prevents us to set the lock (posix
1191 * style). If such a lock exists, update the flock structure with its
1192 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1193 * or leave it the same if we can't. Returns 0 if we don't need to request to
1194 * the server or 1 otherwise.
1195 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001196static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001197cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1198{
1199 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001200 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001201 unsigned char saved_type = flock->fl_type;
1202
Pavel Shilovsky50792762011-10-29 17:17:57 +04001203 if ((flock->fl_flags & FL_POSIX) == 0)
1204 return 1;
1205
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001206 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001207 posix_test_lock(file, flock);
1208
1209 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1210 flock->fl_type = saved_type;
1211 rc = 1;
1212 }
1213
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001214 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001215 return rc;
1216}
1217
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001218/*
1219 * Set the byte-range lock (posix style). Returns:
yangerkun2e98c0182020-07-02 15:25:26 +08001220 * 1) <0, if the error occurs while setting the lock;
1221 * 2) 0, if we set the lock and don't need to request to the server;
1222 * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1223 * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001224 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001225static int
1226cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1227{
Al Viro496ad9a2013-01-23 17:07:38 -05001228 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
yangerkun2e98c0182020-07-02 15:25:26 +08001229 int rc = FILE_LOCK_DEFERRED + 1;
Pavel Shilovsky50792762011-10-29 17:17:57 +04001230
1231 if ((flock->fl_flags & FL_POSIX) == 0)
1232 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001233
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001234 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001235 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001236 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001237 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001238 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001239
1240 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001241 up_write(&cinode->lock_sem);
Steve French9ebb3892012-04-01 13:52:54 -05001242 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001243}
1244
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001245int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001246cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001247{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001248 unsigned int xid;
1249 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001250 struct cifsLockInfo *li, *tmp;
1251 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001252 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001253 LOCKING_ANDX_RANGE *buf, *cur;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001254 static const int types[] = {
1255 LOCKING_ANDX_LARGE_FILES,
1256 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1257 };
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001258 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001259
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001260 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001261 tcon = tlink_tcon(cfile->tlink);
1262
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001263 /*
1264 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001265 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001266 */
1267 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001268 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001269 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001270 return -EINVAL;
1271 }
1272
Ross Lagerwall92a81092019-01-08 18:30:56 +00001273 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1274 PAGE_SIZE);
1275 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1276 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001277 max_num = (max_buf - sizeof(struct smb_hdr)) /
1278 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001279 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001280 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001281 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001282 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001283 }
1284
1285 for (i = 0; i < 2; i++) {
1286 cur = buf;
1287 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001288 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001289 if (li->type != types[i])
1290 continue;
1291 cur->Pid = cpu_to_le16(li->pid);
1292 cur->LengthLow = cpu_to_le32((u32)li->length);
1293 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1294 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1295 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1296 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001297 stored_rc = cifs_lockv(xid, tcon,
1298 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001299 (__u8)li->type, 0, num,
1300 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001301 if (stored_rc)
1302 rc = stored_rc;
1303 cur = buf;
1304 num = 0;
1305 } else
1306 cur++;
1307 }
1308
1309 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001310 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001311 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001312 if (stored_rc)
1313 rc = stored_rc;
1314 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001315 }
1316
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001317 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001318 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001319 return rc;
1320}
1321
Jeff Layton3d224622016-05-24 06:27:44 -04001322static __u32
1323hash_lockowner(fl_owner_t owner)
1324{
1325 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1326}
1327
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001328struct lock_to_push {
1329 struct list_head llist;
1330 __u64 offset;
1331 __u64 length;
1332 __u32 pid;
1333 __u16 netfid;
1334 __u8 type;
1335};
1336
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001337static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001338cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001339{
David Howells2b0143b2015-03-17 22:25:59 +00001340 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001341 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001342 struct file_lock *flock;
1343 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001344 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001345 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001346 struct list_head locks_to_send, *el;
1347 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001348 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001349
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001350 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001351
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001352 if (!flctx)
1353 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001354
Jeff Laytone084c1b2015-02-16 14:32:03 -05001355 spin_lock(&flctx->flc_lock);
1356 list_for_each(el, &flctx->flc_posix) {
1357 count++;
1358 }
1359 spin_unlock(&flctx->flc_lock);
1360
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001361 INIT_LIST_HEAD(&locks_to_send);
1362
1363 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001364 * Allocating count locks is enough because no FL_POSIX locks can be
1365 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001366 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001367 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001368 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001369 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1370 if (!lck) {
1371 rc = -ENOMEM;
1372 goto err_out;
1373 }
1374 list_add_tail(&lck->llist, &locks_to_send);
1375 }
1376
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001377 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001378 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001379 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001380 if (el == &locks_to_send) {
1381 /*
1382 * The list ended. We don't have enough allocated
1383 * structures - something is really wrong.
1384 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001385 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001386 break;
1387 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001388 length = 1 + flock->fl_end - flock->fl_start;
1389 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1390 type = CIFS_RDLCK;
1391 else
1392 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001393 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001394 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001395 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001396 lck->length = length;
1397 lck->type = type;
1398 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001399 }
Jeff Layton6109c852015-01-16 15:05:57 -05001400 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001401
1402 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001403 int stored_rc;
1404
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001405 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001406 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001407 lck->type, 0);
1408 if (stored_rc)
1409 rc = stored_rc;
1410 list_del(&lck->llist);
1411 kfree(lck);
1412 }
1413
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001414out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001415 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001416 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001417err_out:
1418 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1419 list_del(&lck->llist);
1420 kfree(lck);
1421 }
1422 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001423}
1424
1425static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001426cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001427{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001428 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001429 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001430 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001431 int rc = 0;
1432
1433 /* we are going to update can_cache_brlcks here - need a write access */
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001434 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001435 if (!cinode->can_cache_brlcks) {
1436 up_write(&cinode->lock_sem);
1437 return rc;
1438 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001439
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001440 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001441 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1442 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001443 rc = cifs_push_posix_locks(cfile);
1444 else
1445 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001446
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001447 cinode->can_cache_brlcks = false;
1448 up_write(&cinode->lock_sem);
1449 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001450}
1451
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001452static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001453cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001454 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001456 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001457 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001458 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001459 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001460 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001461 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001462 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001464 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001465 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001466 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001467 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001468 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001469 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001470 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001471 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001473 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001474 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001475 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001476 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001477 *lock = 1;
1478 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001479 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001480 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001481 *unlock = 1;
1482 /* Check if unlock includes more than one lock range */
1483 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001484 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001485 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001486 *lock = 1;
1487 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001488 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001489 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001490 *lock = 1;
1491 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001492 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001493 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001494 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001496 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001497}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001499static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001500cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001501 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001502{
1503 int rc = 0;
1504 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001505 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1506 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001507 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001508 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001510 if (posix_lck) {
1511 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001512
1513 rc = cifs_posix_lock_test(file, flock);
1514 if (!rc)
1515 return rc;
1516
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001517 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001518 posix_lock_type = CIFS_RDLCK;
1519 else
1520 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001521 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1522 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001523 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001524 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 return rc;
1526 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001527
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001528 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001529 if (!rc)
1530 return rc;
1531
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001532 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001533 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1534 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001535 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001536 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1537 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001538 flock->fl_type = F_UNLCK;
1539 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001540 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1541 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001542 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001543 }
1544
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001545 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001546 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001547 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001548 }
1549
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001550 type &= ~server->vals->exclusive_lock_type;
1551
1552 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1553 type | server->vals->shared_lock_type,
1554 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001555 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001556 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1557 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001558 flock->fl_type = F_RDLCK;
1559 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001560 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1561 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001562 } else
1563 flock->fl_type = F_WRLCK;
1564
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001565 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001566}
1567
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001568void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001569cifs_move_llist(struct list_head *source, struct list_head *dest)
1570{
1571 struct list_head *li, *tmp;
1572 list_for_each_safe(li, tmp, source)
1573 list_move(li, dest);
1574}
1575
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001576void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001577cifs_free_llist(struct list_head *llist)
1578{
1579 struct cifsLockInfo *li, *tmp;
1580 list_for_each_entry_safe(li, tmp, llist, llist) {
1581 cifs_del_lock_waiters(li);
1582 list_del(&li->llist);
1583 kfree(li);
1584 }
1585}
1586
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001587int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001588cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1589 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001590{
1591 int rc = 0, stored_rc;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001592 static const int types[] = {
1593 LOCKING_ANDX_LARGE_FILES,
1594 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1595 };
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001596 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001597 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001598 LOCKING_ANDX_RANGE *buf, *cur;
1599 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001600 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001601 struct cifsLockInfo *li, *tmp;
1602 __u64 length = 1 + flock->fl_end - flock->fl_start;
1603 struct list_head tmp_llist;
1604
1605 INIT_LIST_HEAD(&tmp_llist);
1606
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001607 /*
1608 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001609 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001610 */
1611 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001612 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001613 return -EINVAL;
1614
Ross Lagerwall92a81092019-01-08 18:30:56 +00001615 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1616 PAGE_SIZE);
1617 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1618 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001619 max_num = (max_buf - sizeof(struct smb_hdr)) /
1620 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001621 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001622 if (!buf)
1623 return -ENOMEM;
1624
Dave Wysochanskid46b0da2019-10-23 05:02:33 -04001625 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001626 for (i = 0; i < 2; i++) {
1627 cur = buf;
1628 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001629 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001630 if (flock->fl_start > li->offset ||
1631 (flock->fl_start + length) <
1632 (li->offset + li->length))
1633 continue;
1634 if (current->tgid != li->pid)
1635 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001636 if (types[i] != li->type)
1637 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001638 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001639 /*
1640 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001641 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001642 */
1643 list_del(&li->llist);
1644 cifs_del_lock_waiters(li);
1645 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001646 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001647 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001648 cur->Pid = cpu_to_le16(li->pid);
1649 cur->LengthLow = cpu_to_le32((u32)li->length);
1650 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1651 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1652 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1653 /*
1654 * We need to save a lock here to let us add it again to
1655 * the file's list if the unlock range request fails on
1656 * the server.
1657 */
1658 list_move(&li->llist, &tmp_llist);
1659 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001660 stored_rc = cifs_lockv(xid, tcon,
1661 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001662 li->type, num, 0, buf);
1663 if (stored_rc) {
1664 /*
1665 * We failed on the unlock range
1666 * request - add all locks from the tmp
1667 * list to the head of the file's list.
1668 */
1669 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001670 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001671 rc = stored_rc;
1672 } else
1673 /*
1674 * The unlock range request succeed -
1675 * free the tmp list.
1676 */
1677 cifs_free_llist(&tmp_llist);
1678 cur = buf;
1679 num = 0;
1680 } else
1681 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001682 }
1683 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001684 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001685 types[i], num, 0, buf);
1686 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001687 cifs_move_llist(&tmp_llist,
1688 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001689 rc = stored_rc;
1690 } else
1691 cifs_free_llist(&tmp_llist);
1692 }
1693 }
1694
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001695 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001696 kfree(buf);
1697 return rc;
1698}
1699
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001700static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001701cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001702 bool wait_flag, bool posix_lck, int lock, int unlock,
1703 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001704{
1705 int rc = 0;
1706 __u64 length = 1 + flock->fl_end - flock->fl_start;
1707 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1708 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001709 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001710 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001711
1712 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001713 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001714
1715 rc = cifs_posix_lock_set(file, flock);
yangerkun2e98c0182020-07-02 15:25:26 +08001716 if (rc <= FILE_LOCK_DEFERRED)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001717 return rc;
1718
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001719 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001720 posix_lock_type = CIFS_RDLCK;
1721 else
1722 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001723
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001724 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001725 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001726
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001727 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001728 hash_lockowner(flock->fl_owner),
1729 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001730 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001731 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001732 }
1733
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001734 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001735 struct cifsLockInfo *lock;
1736
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001737 lock = cifs_lock_init(flock->fl_start, length, type,
1738 flock->fl_flags);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001739 if (!lock)
1740 return -ENOMEM;
1741
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001742 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001743 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001744 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001745 return rc;
1746 }
1747 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001748 goto out;
1749
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001750 /*
1751 * Windows 7 server can delay breaking lease from read to None
1752 * if we set a byte-range lock on a file - break it explicitly
1753 * before sending the lock to the server to be sure the next
1754 * read won't conflict with non-overlapted locks due to
1755 * pagereading.
1756 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001757 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1758 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001759 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001760 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1761 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001762 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001763 }
1764
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001765 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1766 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001767 if (rc) {
1768 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001769 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001770 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001771
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001772 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001773 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001774 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001775
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001776out:
Steve Frenchd0677992019-07-16 18:55:38 -05001777 if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) {
Aurelien Aptelbc31d0c2019-03-14 18:44:16 +01001778 /*
1779 * If this is a request to remove all locks because we
1780 * are closing the file, it doesn't matter if the
1781 * unlocking failed as both cifs.ko and the SMB server
1782 * remove the lock on file close
1783 */
1784 if (rc) {
1785 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1786 if (!(flock->fl_flags & FL_CLOSE))
1787 return rc;
1788 }
Benjamin Coddington4f656362015-10-22 13:38:14 -04001789 rc = locks_lock_file_wait(file, flock);
Aurelien Aptelbc31d0c2019-03-14 18:44:16 +01001790 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001791 return rc;
1792}
1793
Steve Frenchd0677992019-07-16 18:55:38 -05001794int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
1795{
1796 int rc, xid;
1797 int lock = 0, unlock = 0;
1798 bool wait_flag = false;
1799 bool posix_lck = false;
1800 struct cifs_sb_info *cifs_sb;
1801 struct cifs_tcon *tcon;
Steve Frenchd0677992019-07-16 18:55:38 -05001802 struct cifsFileInfo *cfile;
Steve Frenchd0677992019-07-16 18:55:38 -05001803 __u32 type;
1804
1805 rc = -EACCES;
1806 xid = get_xid();
1807
1808 if (!(fl->fl_flags & FL_FLOCK))
1809 return -ENOLCK;
1810
1811 cfile = (struct cifsFileInfo *)file->private_data;
1812 tcon = tlink_tcon(cfile->tlink);
1813
1814 cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
1815 tcon->ses->server);
1816 cifs_sb = CIFS_FILE_SB(file);
Steve Frenchd0677992019-07-16 18:55:38 -05001817
1818 if (cap_unix(tcon->ses) &&
1819 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1820 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1821 posix_lck = true;
1822
1823 if (!lock && !unlock) {
1824 /*
1825 * if no lock or unlock then nothing to do since we do not
1826 * know what it is
1827 */
1828 free_xid(xid);
1829 return -EOPNOTSUPP;
1830 }
1831
1832 rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
1833 xid);
1834 free_xid(xid);
1835 return rc;
1836
1837
1838}
1839
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001840int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1841{
1842 int rc, xid;
1843 int lock = 0, unlock = 0;
1844 bool wait_flag = false;
1845 bool posix_lck = false;
1846 struct cifs_sb_info *cifs_sb;
1847 struct cifs_tcon *tcon;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001848 struct cifsFileInfo *cfile;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001849 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001850
1851 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001852 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001853
Joe Perchesf96637b2013-05-04 22:12:25 -05001854 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1855 cmd, flock->fl_flags, flock->fl_type,
1856 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001857
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001858 cfile = (struct cifsFileInfo *)file->private_data;
1859 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001860
1861 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1862 tcon->ses->server);
Al Viro7119e222014-10-22 00:25:12 -04001863 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001864
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001865 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001866 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1867 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1868 posix_lck = true;
1869 /*
1870 * BB add code here to normalize offset and length to account for
1871 * negative length which we can not accept over the wire.
1872 */
1873 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001874 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001875 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001876 return rc;
1877 }
1878
1879 if (!lock && !unlock) {
1880 /*
1881 * if no lock or unlock then nothing to do since we do not
1882 * know what it is
1883 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001884 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001885 return -EOPNOTSUPP;
1886 }
1887
1888 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1889 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001890 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 return rc;
1892}
1893
Jeff Layton597b0272012-03-23 14:40:56 -04001894/*
1895 * update the file size (if needed) after a write. Should be called with
1896 * the inode->i_lock held
1897 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001898void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001899cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1900 unsigned int bytes_written)
1901{
1902 loff_t end_of_write = offset + bytes_written;
1903
1904 if (end_of_write > cifsi->server_eof)
1905 cifsi->server_eof = end_of_write;
1906}
1907
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001908static ssize_t
1909cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1910 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911{
1912 int rc = 0;
1913 unsigned int bytes_written = 0;
1914 unsigned int total_written;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001915 struct cifs_tcon *tcon;
1916 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001917 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001918 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001919 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Aurelien Aptel7c065142020-06-04 17:23:55 +02001920 struct cifs_io_parms io_parms = {0};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921
Al Viro35c265e2014-08-19 20:25:34 -04001922 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1923 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001925 tcon = tlink_tcon(open_file->tlink);
1926 server = tcon->ses->server;
1927
1928 if (!server->ops->sync_write)
1929 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001930
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001931 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 for (total_written = 0; write_size > total_written;
1934 total_written += bytes_written) {
1935 rc = -EAGAIN;
1936 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001937 struct kvec iov[2];
1938 unsigned int len;
1939
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 /* we could deadlock if we called
1942 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001943 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001945 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 if (rc != 0)
1947 break;
1948 }
Steve French3e844692005-10-03 13:37:24 -07001949
David Howells2b0143b2015-03-17 22:25:59 +00001950 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001951 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001952 /* iov[0] is reserved for smb header */
1953 iov[1].iov_base = (char *)write_data + total_written;
1954 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001955 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001956 io_parms.tcon = tcon;
1957 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001958 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001959 rc = server->ops->sync_write(xid, &open_file->fid,
1960 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 }
1962 if (rc || (bytes_written == 0)) {
1963 if (total_written)
1964 break;
1965 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001966 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 return rc;
1968 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001969 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001970 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001971 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001972 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001973 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001974 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 }
1976
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001977 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978
Jeff Layton7da4b492010-10-15 15:34:00 -04001979 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001980 spin_lock(&d_inode(dentry)->i_lock);
Rohith Surabattula78c09632021-04-19 19:02:03 +00001981 if (*offset > d_inode(dentry)->i_size) {
David Howells2b0143b2015-03-17 22:25:59 +00001982 i_size_write(d_inode(dentry), *offset);
Rohith Surabattula78c09632021-04-19 19:02:03 +00001983 d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9;
1984 }
David Howells2b0143b2015-03-17 22:25:59 +00001985 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 }
David Howells2b0143b2015-03-17 22:25:59 +00001987 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001988 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 return total_written;
1990}
1991
Jeff Layton6508d902010-09-29 19:51:11 -04001992struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1993 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001994{
1995 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001996 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1997
1998 /* only filter by fsuid on multiuser mounts */
1999 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2000 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00002001
Dave Wysochanskicb248812019-10-03 15:16:27 +10002002 spin_lock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00002003 /* we could simply get the first_list_entry since write-only entries
2004 are always at the end of the list but since the first entry might
2005 have a close pending, we go through the whole list */
2006 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08002007 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04002008 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04002009 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Rohith Surabattula860b69a2021-05-05 10:56:47 +00002010 if ((!open_file->invalidHandle)) {
Steve French630f3f0c2007-10-25 21:17:17 +00002011 /* found a good file */
2012 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05002013 cifsFileInfo_get(open_file);
Dave Wysochanskicb248812019-10-03 15:16:27 +10002014 spin_unlock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00002015 return open_file;
2016 } /* else might as well continue, and look for
2017 another, or simply have the caller reopen it
2018 again rather than trying to fix this handle */
2019 } else /* write only file */
2020 break; /* write only files are last so must be done */
2021 }
Dave Wysochanskicb248812019-10-03 15:16:27 +10002022 spin_unlock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00002023 return NULL;
2024}
Steve French630f3f0c2007-10-25 21:17:17 +00002025
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002026/* Return -EBADF if no handle is found and general rc otherwise */
2027int
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002028cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002029 struct cifsFileInfo **ret_file)
Steve French6148a742005-10-05 12:23:19 -07002030{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002031 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04002032 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04002033 bool any_available = false;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002034 int rc = -EBADF;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002035 unsigned int refind = 0;
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002036 bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2037 bool with_delete = flags & FIND_WR_WITH_DELETE;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002038 *ret_file = NULL;
2039
2040 /*
2041 * Having a null inode here (because mapping->host was set to zero by
2042 * the VFS or MM) should not happen but we had reports of on oops (due
2043 * to it being zero) during stress testcases so we need to check for it
2044 */
Steve French60808232006-04-22 15:53:05 +00002045
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002046 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002047 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00002048 dump_stack();
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002049 return rc;
Steve French60808232006-04-22 15:53:05 +00002050 }
2051
Jeff Laytond3892292010-11-02 16:22:50 -04002052 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
2053
Jeff Layton6508d902010-09-29 19:51:11 -04002054 /* only filter by fsuid on multiuser mounts */
2055 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2056 fsuid_only = false;
2057
Dave Wysochanskicb248812019-10-03 15:16:27 +10002058 spin_lock(&cifs_inode->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00002059refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002060 if (refind > MAX_REOPEN_ATT) {
Dave Wysochanskicb248812019-10-03 15:16:27 +10002061 spin_unlock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002062 return rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002063 }
Steve French6148a742005-10-05 12:23:19 -07002064 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04002065 if (!any_available && open_file->pid != current->tgid)
2066 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08002067 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04002068 continue;
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002069 if (with_delete && !(open_file->fid.access & DELETE))
2070 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04002071 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00002072 if (!open_file->invalidHandle) {
2073 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05002074 cifsFileInfo_get(open_file);
Dave Wysochanskicb248812019-10-03 15:16:27 +10002075 spin_unlock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002076 *ret_file = open_file;
2077 return 0;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002078 } else {
2079 if (!inv_file)
2080 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00002081 }
Steve French6148a742005-10-05 12:23:19 -07002082 }
2083 }
Jeff Layton2846d382008-09-22 21:33:33 -04002084 /* couldn't find useable FH with same pid, try any available */
2085 if (!any_available) {
2086 any_available = true;
2087 goto refind_writable;
2088 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002089
2090 if (inv_file) {
2091 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05002092 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002093 }
2094
Dave Wysochanskicb248812019-10-03 15:16:27 +10002095 spin_unlock(&cifs_inode->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002096
2097 if (inv_file) {
2098 rc = cifs_reopen_file(inv_file, false);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002099 if (!rc) {
2100 *ret_file = inv_file;
2101 return 0;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002102 }
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002103
Ronnie Sahlberg487317c2019-06-05 10:38:38 +10002104 spin_lock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002105 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
Ronnie Sahlberg487317c2019-06-05 10:38:38 +10002106 spin_unlock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002107 cifsFileInfo_put(inv_file);
2108 ++refind;
2109 inv_file = NULL;
Dave Wysochanskicb248812019-10-03 15:16:27 +10002110 spin_lock(&cifs_inode->open_file_lock);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002111 goto refind_writable;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05002112 }
2113
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002114 return rc;
2115}
2116
2117struct cifsFileInfo *
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002118find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002119{
2120 struct cifsFileInfo *cfile;
2121 int rc;
2122
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002123 rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002124 if (rc)
Joe Perchesa0a30362020-04-14 22:42:53 -07002125 cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002126
2127 return cfile;
Steve French6148a742005-10-05 12:23:19 -07002128}
2129
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002130int
2131cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002132 int flags,
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002133 struct cifsFileInfo **ret_file)
2134{
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002135 struct cifsFileInfo *cfile;
Al Virof6a9bc32021-03-05 17:36:04 -05002136 void *page = alloc_dentry_path();
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002137
2138 *ret_file = NULL;
2139
2140 spin_lock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002141 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2142 struct cifsInodeInfo *cinode;
2143 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2144 if (IS_ERR(full_path)) {
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002145 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002146 free_dentry_path(page);
2147 return PTR_ERR(full_path);
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002148 }
Al Virof6a9bc32021-03-05 17:36:04 -05002149 if (strcmp(full_path, name))
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002150 continue;
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002151
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002152 cinode = CIFS_I(d_inode(cfile->dentry));
2153 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002154 free_dentry_path(page);
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002155 return cifs_get_writable_file(cinode, flags, ret_file);
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002156 }
2157
2158 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002159 free_dentry_path(page);
Ronnie Sahlberg8de9e862019-08-30 08:25:46 +10002160 return -ENOENT;
2161}
2162
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002163int
2164cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2165 struct cifsFileInfo **ret_file)
2166{
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002167 struct cifsFileInfo *cfile;
Al Virof6a9bc32021-03-05 17:36:04 -05002168 void *page = alloc_dentry_path();
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002169
2170 *ret_file = NULL;
2171
2172 spin_lock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002173 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2174 struct cifsInodeInfo *cinode;
2175 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2176 if (IS_ERR(full_path)) {
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002177 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002178 free_dentry_path(page);
2179 return PTR_ERR(full_path);
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002180 }
Al Virof6a9bc32021-03-05 17:36:04 -05002181 if (strcmp(full_path, name))
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002182 continue;
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002183
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002184 cinode = CIFS_I(d_inode(cfile->dentry));
2185 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002186 free_dentry_path(page);
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002187 *ret_file = find_readable_file(cinode, 0);
2188 return *ret_file ? 0 : -ENOENT;
2189 }
2190
2191 spin_unlock(&tcon->open_file_lock);
Al Virof6a9bc32021-03-05 17:36:04 -05002192 free_dentry_path(page);
Ronnie Sahlberg496902d2019-09-09 15:30:00 +10002193 return -ENOENT;
2194}
2195
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
2197{
2198 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002199 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 char *write_data;
2201 int rc = -EFAULT;
2202 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07002204 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
2206 if (!mapping || !mapping->host)
2207 return -EFAULT;
2208
2209 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210
2211 offset += (loff_t)from;
2212 write_data = kmap(page);
2213 write_data += from;
2214
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002215 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 kunmap(page);
2217 return -EIO;
2218 }
2219
2220 /* racing with truncate? */
2221 if (offset > mapping->host->i_size) {
2222 kunmap(page);
2223 return 0; /* don't care */
2224 }
2225
2226 /* check to make sure that we are not extending the file */
2227 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002228 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002230 rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
2231 &open_file);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002232 if (!rc) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04002233 bytes_written = cifs_write(open_file, open_file->pid,
2234 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04002235 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07002237 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002238 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07002239 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002240 else if (bytes_written < 0)
2241 rc = bytes_written;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002242 else
2243 rc = -EFAULT;
Steve French6148a742005-10-05 12:23:19 -07002244 } else {
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002245 cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
2246 if (!is_retryable_error(rc))
2247 rc = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 }
2249
2250 kunmap(page);
2251 return rc;
2252}
2253
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002254static struct cifs_writedata *
2255wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2256 pgoff_t end, pgoff_t *index,
2257 unsigned int *found_pages)
2258{
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002259 struct cifs_writedata *wdata;
2260
2261 wdata = cifs_writedata_alloc((unsigned int)tofind,
2262 cifs_writev_complete);
2263 if (!wdata)
2264 return NULL;
2265
Jan Kara9c19a9c2017-11-15 17:35:26 -08002266 *found_pages = find_get_pages_range_tag(mapping, index, end,
2267 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002268 return wdata;
2269}
2270
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002271static unsigned int
2272wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2273 struct address_space *mapping,
2274 struct writeback_control *wbc,
2275 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2276{
2277 unsigned int nr_pages = 0, i;
2278 struct page *page;
2279
2280 for (i = 0; i < found_pages; i++) {
2281 page = wdata->pages[i];
2282 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002283 * At this point we hold neither the i_pages lock nor the
2284 * page lock: the page may be truncated or invalidated
2285 * (changing page->mapping to NULL), or even swizzled
2286 * back from swapper_space to tmpfs file mapping
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002287 */
2288
2289 if (nr_pages == 0)
2290 lock_page(page);
2291 else if (!trylock_page(page))
2292 break;
2293
2294 if (unlikely(page->mapping != mapping)) {
2295 unlock_page(page);
2296 break;
2297 }
2298
2299 if (!wbc->range_cyclic && page->index > end) {
2300 *done = true;
2301 unlock_page(page);
2302 break;
2303 }
2304
2305 if (*next && (page->index != *next)) {
2306 /* Not next consecutive page */
2307 unlock_page(page);
2308 break;
2309 }
2310
2311 if (wbc->sync_mode != WB_SYNC_NONE)
2312 wait_on_page_writeback(page);
2313
2314 if (PageWriteback(page) ||
2315 !clear_page_dirty_for_io(page)) {
2316 unlock_page(page);
2317 break;
2318 }
2319
2320 /*
2321 * This actually clears the dirty bit in the radix tree.
2322 * See cifs_writepage() for more commentary.
2323 */
2324 set_page_writeback(page);
2325 if (page_offset(page) >= i_size_read(mapping->host)) {
2326 *done = true;
2327 unlock_page(page);
2328 end_page_writeback(page);
2329 break;
2330 }
2331
2332 wdata->pages[i] = page;
2333 *next = page->index + 1;
2334 ++nr_pages;
2335 }
2336
2337 /* reset index to refind any pages skipped */
2338 if (nr_pages == 0)
2339 *index = wdata->pages[0]->index + 1;
2340
2341 /* put any pages we aren't going to use */
2342 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002343 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002344 wdata->pages[i] = NULL;
2345 }
2346
2347 return nr_pages;
2348}
2349
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002350static int
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002351wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2352 struct address_space *mapping, struct writeback_control *wbc)
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002353{
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002354 int rc;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002355
2356 wdata->sync_mode = wbc->sync_mode;
2357 wdata->nr_pages = nr_pages;
2358 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002359 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002360 wdata->tailsz = min(i_size_read(mapping->host) -
2361 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002362 (loff_t)PAGE_SIZE);
2363 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002364 wdata->pid = wdata->cfile->pid;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002365
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002366 rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002367 if (rc)
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002368 return rc;
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08002369
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002370 if (wdata->cfile->invalidHandle)
2371 rc = -EAGAIN;
2372 else
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002373 rc = wdata->server->ops->async_writev(wdata,
2374 cifs_writedata_release);
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002375
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002376 return rc;
2377}
2378
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002380 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381{
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002382 struct inode *inode = mapping->host;
2383 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002384 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002385 bool done = false, scanned = false, range_whole = false;
2386 pgoff_t end, index;
2387 struct cifs_writedata *wdata;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002388 struct cifsFileInfo *cfile = NULL;
Steve French37c0eb42005-10-05 14:50:29 -07002389 int rc = 0;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002390 int saved_rc = 0;
Steve French0cb012d2018-10-11 01:01:02 -05002391 unsigned int xid;
Steve French50c2f752007-07-13 00:33:32 +00002392
Steve French37c0eb42005-10-05 14:50:29 -07002393 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002394 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002395 * one page at a time via cifs_writepage
2396 */
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10002397 if (cifs_sb->ctx->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002398 return generic_writepages(mapping, wbc);
2399
Steve French0cb012d2018-10-11 01:01:02 -05002400 xid = get_xid();
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002401 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002402 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002403 end = -1;
2404 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002405 index = wbc->range_start >> PAGE_SHIFT;
2406 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002407 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002408 range_whole = true;
2409 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002410 }
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002411 server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses);
2412
Steve French37c0eb42005-10-05 14:50:29 -07002413retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002414 while (!done && index <= end) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002415 unsigned int i, nr_pages, found_pages, wsize;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002416 pgoff_t next = 0, tofind, saved_index = index;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002417 struct cifs_credits credits_on_stack;
2418 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002419 int get_file_rc = 0;
Steve French37c0eb42005-10-05 14:50:29 -07002420
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002421 if (cfile)
2422 cifsFileInfo_put(cfile);
2423
Aurelien Aptel86f740f2020-02-21 11:19:06 +01002424 rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002425
2426 /* in case of an error store it to return later */
2427 if (rc)
2428 get_file_rc = rc;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002429
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10002430 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002431 &wsize, credits);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002432 if (rc != 0) {
2433 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002434 break;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002435 }
Steve French37c0eb42005-10-05 14:50:29 -07002436
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002437 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002438
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002439 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2440 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002441 if (!wdata) {
2442 rc = -ENOMEM;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002443 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002444 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002445 break;
2446 }
2447
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002448 if (found_pages == 0) {
2449 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002450 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002451 break;
2452 }
2453
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002454 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2455 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002456
2457 /* nothing to write? */
2458 if (nr_pages == 0) {
2459 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002460 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002461 continue;
2462 }
2463
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002464 wdata->credits = credits_on_stack;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002465 wdata->cfile = cfile;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002466 wdata->server = server;
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002467 cfile = NULL;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002468
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002469 if (!wdata->cfile) {
Pavel Shilovskyfe768d52019-01-29 12:15:11 -08002470 cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
2471 get_file_rc);
2472 if (is_retryable_error(get_file_rc))
2473 rc = get_file_rc;
2474 else
2475 rc = -EBADF;
Pavel Shilovskyc4b8f652019-01-28 12:09:02 -08002476 } else
2477 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002478
Pavel Shilovsky258f0602019-01-28 11:57:00 -08002479 for (i = 0; i < nr_pages; ++i)
2480 unlock_page(wdata->pages[i]);
2481
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002482 /* send failure -- clean up the mess */
2483 if (rc != 0) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002484 add_credits_and_wake_if(server, &wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002485 for (i = 0; i < nr_pages; ++i) {
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002486 if (is_retryable_error(rc))
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002487 redirty_page_for_writepage(wbc,
2488 wdata->pages[i]);
2489 else
2490 SetPageError(wdata->pages[i]);
2491 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002492 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002493 }
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002494 if (!is_retryable_error(rc))
Jeff Layton941b8532011-01-11 07:24:01 -05002495 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002496 }
2497 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002498
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002499 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2500 index = saved_index;
2501 continue;
2502 }
2503
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002504 /* Return immediately if we received a signal during writing */
2505 if (is_interrupt_error(rc)) {
2506 done = true;
2507 break;
2508 }
2509
2510 if (rc != 0 && saved_rc == 0)
2511 saved_rc = rc;
2512
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002513 wbc->nr_to_write -= nr_pages;
2514 if (wbc->nr_to_write <= 0)
2515 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002516
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002517 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002518 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002519
Steve French37c0eb42005-10-05 14:50:29 -07002520 if (!scanned && !done) {
2521 /*
2522 * We hit the last page and there is more work to be done: wrap
2523 * back to the start of the file
2524 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002525 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002526 index = 0;
2527 goto retry;
2528 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002529
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002530 if (saved_rc != 0)
2531 rc = saved_rc;
2532
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002533 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002534 mapping->writeback_index = index;
2535
Pavel Shilovskyc7d38db2019-01-25 15:23:36 -08002536 if (cfile)
2537 cifsFileInfo_put(cfile);
Steve French0cb012d2018-10-11 01:01:02 -05002538 free_xid(xid);
Rohith Surabattulac3f207a2021-04-13 00:26:42 -05002539 /* Indication to update ctime and mtime as close is deferred */
2540 set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 return rc;
2542}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002544static int
2545cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002547 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002548 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002550 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002552 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002553 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002554 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002555
2556 /*
2557 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2558 *
2559 * A writepage() implementation always needs to do either this,
2560 * or re-dirty the page with "redirty_page_for_writepage()" in
2561 * the case of a failure.
2562 *
2563 * Just unlocking the page will cause the radix tree tag-bits
2564 * to fail to update with the state of the page correctly.
2565 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002566 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002567retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002568 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002569 if (is_retryable_error(rc)) {
2570 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
Jeff Layton97b37f22017-05-25 06:59:52 -04002571 goto retry_write;
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002572 redirty_page_for_writepage(wbc, page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002573 } else if (rc != 0) {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002574 SetPageError(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002575 mapping_set_error(page->mapping, rc);
2576 } else {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002577 SetPageUptodate(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002578 }
Linus Torvaldscb876f42006-12-23 16:19:07 -08002579 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002580 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002581 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 return rc;
2583}
2584
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002585static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2586{
2587 int rc = cifs_writepage_locked(page, wbc);
2588 unlock_page(page);
2589 return rc;
2590}
2591
Nick Piggind9414772008-09-24 11:32:59 -04002592static int cifs_write_end(struct file *file, struct address_space *mapping,
2593 loff_t pos, unsigned len, unsigned copied,
2594 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595{
Nick Piggind9414772008-09-24 11:32:59 -04002596 int rc;
2597 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002598 struct cifsFileInfo *cfile = file->private_data;
2599 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2600 __u32 pid;
2601
2602 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2603 pid = cfile->pid;
2604 else
2605 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606
Joe Perchesf96637b2013-05-04 22:12:25 -05002607 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002608 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002609
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002610 if (PageChecked(page)) {
2611 if (copied == len)
2612 SetPageUptodate(page);
2613 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002614 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002615 SetPageUptodate(page);
2616
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002618 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002619 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002620 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002621
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002622 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 /* this is probably better than directly calling
2624 partialpage_write since in this function the file handle is
2625 known which we might as well leverage */
2626 /* BB check if anything else missing out of ppw
2627 such as updating last write time */
2628 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002629 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002630 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002632
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002633 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002634 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002635 rc = copied;
2636 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002637 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 }
2639
Nick Piggind9414772008-09-24 11:32:59 -04002640 if (rc > 0) {
2641 spin_lock(&inode->i_lock);
Rohith Surabattula78c09632021-04-19 19:02:03 +00002642 if (pos > inode->i_size) {
Nick Piggind9414772008-09-24 11:32:59 -04002643 i_size_write(inode, pos);
Rohith Surabattula78c09632021-04-19 19:02:03 +00002644 inode->i_blocks = (512 - 1 + pos) >> 9;
2645 }
Nick Piggind9414772008-09-24 11:32:59 -04002646 spin_unlock(&inode->i_lock);
2647 }
2648
2649 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002650 put_page(page);
Rohith Surabattulac3f207a2021-04-13 00:26:42 -05002651 /* Indication to update ctime and mtime as close is deferred */
2652 set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
Nick Piggind9414772008-09-24 11:32:59 -04002653
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 return rc;
2655}
2656
Josef Bacik02c24a82011-07-16 20:44:56 -04002657int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2658 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002660 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002662 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002663 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002664 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002665 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002666 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002668 rc = file_write_and_wait_range(file, start, end);
Steve French2391ca42020-02-06 16:04:59 -06002669 if (rc) {
2670 trace_cifs_fsync_err(inode->i_ino, rc);
Josef Bacik02c24a82011-07-16 20:44:56 -04002671 return rc;
Steve French2391ca42020-02-06 16:04:59 -06002672 }
Josef Bacik02c24a82011-07-16 20:44:56 -04002673
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002674 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675
Al Viro35c265e2014-08-19 20:25:34 -04002676 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2677 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002678
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002679 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002680 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002681 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002682 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002683 rc = 0; /* don't care about it in fsync */
2684 }
2685 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002686
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002687 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002688 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2689 server = tcon->ses->server;
2690 if (server->ops->flush)
2691 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2692 else
2693 rc = -ENOSYS;
2694 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002695
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002696 free_xid(xid);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002697 return rc;
2698}
2699
Josef Bacik02c24a82011-07-16 20:44:56 -04002700int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002701{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002702 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002703 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002704 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002705 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002706 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002707 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002708
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002709 rc = file_write_and_wait_range(file, start, end);
Steve Frenchf2bf09e2020-02-05 18:22:37 -06002710 if (rc) {
2711 trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
Josef Bacik02c24a82011-07-16 20:44:56 -04002712 return rc;
Steve Frenchf2bf09e2020-02-05 18:22:37 -06002713 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002714
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002715 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002716
Al Viro35c265e2014-08-19 20:25:34 -04002717 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2718 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002719
2720 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002721 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2722 server = tcon->ses->server;
2723 if (server->ops->flush)
2724 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2725 else
2726 rc = -ENOSYS;
2727 }
Steve Frenchb298f222009-02-21 21:17:43 +00002728
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002729 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 return rc;
2731}
2732
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733/*
2734 * As file closes, flush all cached write data for this inode checking
2735 * for write behind errors.
2736 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002737int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738{
Al Viro496ad9a2013-01-23 17:07:38 -05002739 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 int rc = 0;
2741
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002742 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002743 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002744
Joe Perchesf96637b2013-05-04 22:12:25 -05002745 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Steve Frenchf2bf09e2020-02-05 18:22:37 -06002746 if (rc)
2747 trace_cifs_flush_err(inode->i_ino, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748 return rc;
2749}
2750
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002751static int
2752cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2753{
2754 int rc = 0;
2755 unsigned long i;
2756
2757 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002758 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002759 if (!pages[i]) {
2760 /*
2761 * save number of pages we have already allocated and
2762 * return with ENOMEM error
2763 */
2764 num_pages = i;
2765 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002766 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002767 }
2768 }
2769
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002770 if (rc) {
2771 for (i = 0; i < num_pages; i++)
2772 put_page(pages[i]);
2773 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002774 return rc;
2775}
2776
2777static inline
2778size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2779{
2780 size_t num_pages;
2781 size_t clen;
2782
2783 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002784 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002785
2786 if (cur_len)
2787 *cur_len = clen;
2788
2789 return num_pages;
2790}
2791
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002792static void
Steve French4a5c80d2014-02-07 20:45:12 -06002793cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002794{
2795 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002796 struct cifs_writedata *wdata = container_of(refcount,
2797 struct cifs_writedata, refcount);
2798
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002799 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
Steve French4a5c80d2014-02-07 20:45:12 -06002800 for (i = 0; i < wdata->nr_pages; i++)
2801 put_page(wdata->pages[i]);
2802 cifs_writedata_release(refcount);
2803}
2804
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002805static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2806
Steve French4a5c80d2014-02-07 20:45:12 -06002807static void
2808cifs_uncached_writev_complete(struct work_struct *work)
2809{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002810 struct cifs_writedata *wdata = container_of(work,
2811 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002812 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002813 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2814
2815 spin_lock(&inode->i_lock);
2816 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2817 if (cifsi->server_eof > inode->i_size)
2818 i_size_write(inode, cifsi->server_eof);
2819 spin_unlock(&inode->i_lock);
2820
2821 complete(&wdata->done);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002822 collect_uncached_write_data(wdata->ctx);
2823 /* the below call can possibly free the last ref to aio ctx */
Steve French4a5c80d2014-02-07 20:45:12 -06002824 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002825}
2826
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002827static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002828wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2829 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002830{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002831 size_t save_len, copied, bytes, cur_len = *len;
2832 unsigned long i, nr_pages = *num_pages;
2833
2834 save_len = cur_len;
2835 for (i = 0; i < nr_pages; i++) {
2836 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2837 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2838 cur_len -= copied;
2839 /*
2840 * If we didn't copy as much as we expected, then that
2841 * may mean we trod into an unmapped area. Stop copying
2842 * at that point. On the next pass through the big
2843 * loop, we'll likely end up getting a zero-length
2844 * write and bailing out of it.
2845 */
2846 if (copied < bytes)
2847 break;
2848 }
2849 cur_len = save_len - cur_len;
2850 *len = cur_len;
2851
2852 /*
2853 * If we have no data to send, then that probably means that
2854 * the copy above failed altogether. That's most likely because
2855 * the address in the iovec was bogus. Return -EFAULT and let
2856 * the caller free anything we allocated and bail out.
2857 */
2858 if (!cur_len)
2859 return -EFAULT;
2860
2861 /*
2862 * i + 1 now represents the number of pages we actually used in
2863 * the copy phase above.
2864 */
2865 *num_pages = i + 1;
2866 return 0;
2867}
2868
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002869static int
Long Li8c5f9c12018-10-31 22:13:10 +00002870cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
2871 struct cifs_aio_ctx *ctx)
2872{
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002873 unsigned int wsize;
2874 struct cifs_credits credits;
Long Li8c5f9c12018-10-31 22:13:10 +00002875 int rc;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002876 struct TCP_Server_Info *server = wdata->server;
Long Li8c5f9c12018-10-31 22:13:10 +00002877
Long Li8c5f9c12018-10-31 22:13:10 +00002878 do {
Long Lid53e2922019-03-15 07:54:59 +00002879 if (wdata->cfile->invalidHandle) {
Long Li8c5f9c12018-10-31 22:13:10 +00002880 rc = cifs_reopen_file(wdata->cfile, false);
Long Lid53e2922019-03-15 07:54:59 +00002881 if (rc == -EAGAIN)
2882 continue;
2883 else if (rc)
2884 break;
2885 }
2886
2887
2888 /*
2889 * Wait for credits to resend this wdata.
2890 * Note: we are attempting to resend the whole wdata not in
2891 * segments
2892 */
2893 do {
2894 rc = server->ops->wait_mtu_credits(server, wdata->bytes,
2895 &wsize, &credits);
2896 if (rc)
2897 goto fail;
2898
2899 if (wsize < wdata->bytes) {
2900 add_credits_and_wake_if(server, &credits, 0);
2901 msleep(1000);
2902 }
2903 } while (wsize < wdata->bytes);
2904 wdata->credits = credits;
2905
2906 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2907
2908 if (!rc) {
2909 if (wdata->cfile->invalidHandle)
2910 rc = -EAGAIN;
Long Lib7a55bb2019-10-15 22:54:50 +00002911 else {
2912#ifdef CONFIG_CIFS_SMB_DIRECT
2913 if (wdata->mr) {
2914 wdata->mr->need_invalidate = true;
2915 smbd_deregister_mr(wdata->mr);
2916 wdata->mr = NULL;
2917 }
2918#endif
Long Lid53e2922019-03-15 07:54:59 +00002919 rc = server->ops->async_writev(wdata,
Long Li8c5f9c12018-10-31 22:13:10 +00002920 cifs_uncached_writedata_release);
Long Lib7a55bb2019-10-15 22:54:50 +00002921 }
Long Lid53e2922019-03-15 07:54:59 +00002922 }
Long Li8c5f9c12018-10-31 22:13:10 +00002923
Long Lid53e2922019-03-15 07:54:59 +00002924 /* If the write was successfully sent, we are done */
2925 if (!rc) {
2926 list_add_tail(&wdata->list, wdata_list);
2927 return 0;
2928 }
Long Li8c5f9c12018-10-31 22:13:10 +00002929
Long Lid53e2922019-03-15 07:54:59 +00002930 /* Roll back credits and retry if needed */
2931 add_credits_and_wake_if(server, &wdata->credits, 0);
2932 } while (rc == -EAGAIN);
2933
2934fail:
Long Li8c5f9c12018-10-31 22:13:10 +00002935 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Long Li8c5f9c12018-10-31 22:13:10 +00002936 return rc;
2937}
2938
2939static int
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002940cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2941 struct cifsFileInfo *open_file,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002942 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2943 struct cifs_aio_ctx *ctx)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002944{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002945 int rc = 0;
2946 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002947 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002948 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002949 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002950 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002951 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002952 struct TCP_Server_Info *server;
Long Li8c5f9c12018-10-31 22:13:10 +00002953 struct page **pagevec;
2954 size_t start;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002955 unsigned int xid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002956
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002957 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2958 pid = open_file->pid;
2959 else
2960 pid = current->tgid;
2961
Aurelien Aptel352d96f2020-05-31 12:38:22 -05002962 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002963 xid = get_xid();
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002964
2965 do {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002966 unsigned int wsize;
2967 struct cifs_credits credits_on_stack;
2968 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002969
Pavel Shilovsky3e952992019-01-25 11:59:01 -08002970 if (open_file->invalidHandle) {
2971 rc = cifs_reopen_file(open_file, false);
2972 if (rc == -EAGAIN)
2973 continue;
2974 else if (rc)
2975 break;
2976 }
2977
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10002978 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08002979 &wsize, credits);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002980 if (rc)
2981 break;
2982
Long Lib6bc8a72018-12-16 23:17:04 +00002983 cur_len = min_t(const size_t, len, wsize);
2984
Long Li8c5f9c12018-10-31 22:13:10 +00002985 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05002986 ssize_t result;
2987
2988 result = iov_iter_get_pages_alloc(
Long Lib6bc8a72018-12-16 23:17:04 +00002989 from, &pagevec, cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05002990 if (result < 0) {
Long Li8c5f9c12018-10-31 22:13:10 +00002991 cifs_dbg(VFS,
Joe Perchesa0a30362020-04-14 22:42:53 -07002992 "direct_writev couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
2993 result, iov_iter_type(from),
2994 from->iov_offset, from->count);
Long Li8c5f9c12018-10-31 22:13:10 +00002995 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00002996
2997 rc = result;
2998 add_credits_and_wake_if(server, credits, 0);
Long Li8c5f9c12018-10-31 22:13:10 +00002999 break;
3000 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05003001 cur_len = (size_t)result;
Long Li8c5f9c12018-10-31 22:13:10 +00003002 iov_iter_advance(from, cur_len);
3003
3004 nr_pages =
3005 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
3006
3007 wdata = cifs_writedata_direct_alloc(pagevec,
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003008 cifs_uncached_writev_complete);
Long Li8c5f9c12018-10-31 22:13:10 +00003009 if (!wdata) {
3010 rc = -ENOMEM;
3011 add_credits_and_wake_if(server, credits, 0);
3012 break;
3013 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003014
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003015
Long Li8c5f9c12018-10-31 22:13:10 +00003016 wdata->page_offset = start;
3017 wdata->tailsz =
3018 nr_pages > 1 ?
3019 cur_len - (PAGE_SIZE - start) -
3020 (nr_pages - 2) * PAGE_SIZE :
3021 cur_len;
3022 } else {
3023 nr_pages = get_numpages(wsize, len, &cur_len);
3024 wdata = cifs_writedata_alloc(nr_pages,
3025 cifs_uncached_writev_complete);
3026 if (!wdata) {
3027 rc = -ENOMEM;
3028 add_credits_and_wake_if(server, credits, 0);
3029 break;
3030 }
Jeff Layton5d81de82014-02-14 07:20:35 -05003031
Long Li8c5f9c12018-10-31 22:13:10 +00003032 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
3033 if (rc) {
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08003034 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00003035 kfree(wdata);
3036 add_credits_and_wake_if(server, credits, 0);
3037 break;
3038 }
3039
3040 num_pages = nr_pages;
3041 rc = wdata_fill_from_iovec(
3042 wdata, from, &cur_len, &num_pages);
3043 if (rc) {
3044 for (i = 0; i < nr_pages; i++)
3045 put_page(wdata->pages[i]);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08003046 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00003047 kfree(wdata);
3048 add_credits_and_wake_if(server, credits, 0);
3049 break;
3050 }
3051
3052 /*
3053 * Bring nr_pages down to the number of pages we
3054 * actually used, and free any pages that we didn't use.
3055 */
3056 for ( ; nr_pages > num_pages; nr_pages--)
3057 put_page(wdata->pages[nr_pages - 1]);
3058
3059 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
3060 }
Jeff Layton5d81de82014-02-14 07:20:35 -05003061
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003062 wdata->sync_mode = WB_SYNC_ALL;
3063 wdata->nr_pages = nr_pages;
3064 wdata->offset = (__u64)offset;
3065 wdata->cfile = cifsFileInfo_get(open_file);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003066 wdata->server = server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003067 wdata->pid = pid;
3068 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07003069 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003070 wdata->credits = credits_on_stack;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003071 wdata->ctx = ctx;
3072 kref_get(&ctx->refcount);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04003073
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003074 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
3075
3076 if (!rc) {
3077 if (wdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003078 rc = -EAGAIN;
3079 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003080 rc = server->ops->async_writev(wdata,
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04003081 cifs_uncached_writedata_release);
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003082 }
3083
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003084 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003085 add_credits_and_wake_if(server, &wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06003086 kref_put(&wdata->refcount,
3087 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04003088 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04003089 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04003090 iov_iter_advance(from, offset - saved_offset);
3091 continue;
3092 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003093 break;
3094 }
3095
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04003096 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003097 offset += cur_len;
3098 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003099 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003100
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003101 free_xid(xid);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003102 return rc;
3103}
3104
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003105static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
3106{
3107 struct cifs_writedata *wdata, *tmp;
3108 struct cifs_tcon *tcon;
3109 struct cifs_sb_info *cifs_sb;
3110 struct dentry *dentry = ctx->cfile->dentry;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003111 int rc;
3112
3113 tcon = tlink_tcon(ctx->cfile->tlink);
3114 cifs_sb = CIFS_SB(dentry->d_sb);
3115
3116 mutex_lock(&ctx->aio_mutex);
3117
3118 if (list_empty(&ctx->list)) {
3119 mutex_unlock(&ctx->aio_mutex);
3120 return;
3121 }
3122
3123 rc = ctx->rc;
3124 /*
3125 * Wait for and collect replies for any successful sends in order of
3126 * increasing offset. Once an error is hit, then return without waiting
3127 * for any more replies.
3128 */
3129restart_loop:
3130 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
3131 if (!rc) {
3132 if (!try_wait_for_completion(&wdata->done)) {
3133 mutex_unlock(&ctx->aio_mutex);
3134 return;
3135 }
3136
3137 if (wdata->result)
3138 rc = wdata->result;
3139 else
3140 ctx->total_len += wdata->bytes;
3141
3142 /* resend call if it's a retryable error */
3143 if (rc == -EAGAIN) {
3144 struct list_head tmp_list;
3145 struct iov_iter tmp_from = ctx->iter;
3146
3147 INIT_LIST_HEAD(&tmp_list);
3148 list_del_init(&wdata->list);
3149
Long Li8c5f9c12018-10-31 22:13:10 +00003150 if (ctx->direct_io)
3151 rc = cifs_resend_wdata(
3152 wdata, &tmp_list, ctx);
3153 else {
3154 iov_iter_advance(&tmp_from,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003155 wdata->offset - ctx->pos);
3156
Long Li8c5f9c12018-10-31 22:13:10 +00003157 rc = cifs_write_from_iter(wdata->offset,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003158 wdata->bytes, &tmp_from,
3159 ctx->cfile, cifs_sb, &tmp_list,
3160 ctx);
Long Lid53e2922019-03-15 07:54:59 +00003161
3162 kref_put(&wdata->refcount,
3163 cifs_uncached_writedata_release);
Long Li8c5f9c12018-10-31 22:13:10 +00003164 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003165
3166 list_splice(&tmp_list, &ctx->list);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003167 goto restart_loop;
3168 }
3169 }
3170 list_del_init(&wdata->list);
3171 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
3172 }
3173
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003174 cifs_stats_bytes_written(tcon, ctx->total_len);
3175 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
3176
3177 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3178
3179 mutex_unlock(&ctx->aio_mutex);
3180
3181 if (ctx->iocb && ctx->iocb->ki_complete)
3182 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3183 else
3184 complete(&ctx->done);
3185}
3186
Long Li8c5f9c12018-10-31 22:13:10 +00003187static ssize_t __cifs_writev(
3188 struct kiocb *iocb, struct iov_iter *from, bool direct)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003189{
Al Viroe9d15932015-04-06 22:44:11 -04003190 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003191 ssize_t total_written = 0;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003192 struct cifsFileInfo *cfile;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003193 struct cifs_tcon *tcon;
3194 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003195 struct cifs_aio_ctx *ctx;
Al Virofc56b982016-09-21 18:18:23 -04003196 struct iov_iter saved_from = *from;
Long Li8c5f9c12018-10-31 22:13:10 +00003197 size_t len = iov_iter_count(from);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003198 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003199
Al Viroe9d15932015-04-06 22:44:11 -04003200 /*
Long Li8c5f9c12018-10-31 22:13:10 +00003201 * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
3202 * In this case, fall back to non-direct write function.
3203 * this could be improved by getting pages directly in ITER_KVEC
Al Viroe9d15932015-04-06 22:44:11 -04003204 */
David Howells66294002019-11-21 08:13:58 +00003205 if (direct && iov_iter_is_kvec(from)) {
Long Li8c5f9c12018-10-31 22:13:10 +00003206 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
3207 direct = false;
3208 }
Al Viroe9d15932015-04-06 22:44:11 -04003209
Al Viro3309dd02015-04-09 12:55:47 -04003210 rc = generic_write_checks(iocb, from);
3211 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003212 return rc;
3213
Al Viro7119e222014-10-22 00:25:12 -04003214 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003215 cfile = file->private_data;
3216 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003217
3218 if (!tcon->ses->server->ops->async_writev)
3219 return -ENOSYS;
3220
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003221 ctx = cifs_aio_ctx_alloc();
3222 if (!ctx)
3223 return -ENOMEM;
3224
3225 ctx->cfile = cifsFileInfo_get(cfile);
3226
3227 if (!is_sync_kiocb(iocb))
3228 ctx->iocb = iocb;
3229
3230 ctx->pos = iocb->ki_pos;
3231
Long Li8c5f9c12018-10-31 22:13:10 +00003232 if (direct) {
3233 ctx->direct_io = true;
3234 ctx->iter = *from;
3235 ctx->len = len;
3236 } else {
3237 rc = setup_aio_ctx_iter(ctx, from, WRITE);
3238 if (rc) {
3239 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3240 return rc;
3241 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003242 }
3243
3244 /* grab a lock here due to read response handlers can access ctx */
3245 mutex_lock(&ctx->aio_mutex);
3246
3247 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
3248 cfile, cifs_sb, &ctx->list, ctx);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003249
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003250 /*
3251 * If at least one write was successfully sent, then discard any rc
3252 * value from the later writes. If the other write succeeds, then
3253 * we'll end up returning whatever was written. If it fails, then
3254 * we'll get a new rc value from that.
3255 */
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003256 if (!list_empty(&ctx->list))
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003257 rc = 0;
3258
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003259 mutex_unlock(&ctx->aio_mutex);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04003260
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003261 if (rc) {
3262 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3263 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003264 }
3265
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07003266 if (!is_sync_kiocb(iocb)) {
3267 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3268 return -EIOCBQUEUED;
3269 }
3270
3271 rc = wait_for_completion_killable(&ctx->done);
3272 if (rc) {
3273 mutex_lock(&ctx->aio_mutex);
3274 ctx->rc = rc = -EINTR;
3275 total_written = ctx->total_len;
3276 mutex_unlock(&ctx->aio_mutex);
3277 } else {
3278 rc = ctx->rc;
3279 total_written = ctx->total_len;
3280 }
3281
3282 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3283
Al Viroe9d15932015-04-06 22:44:11 -04003284 if (unlikely(!total_written))
3285 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003286
Al Viroe9d15932015-04-06 22:44:11 -04003287 iocb->ki_pos += total_written;
Al Viroe9d15932015-04-06 22:44:11 -04003288 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003289}
3290
Long Li8c5f9c12018-10-31 22:13:10 +00003291ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
3292{
3293 return __cifs_writev(iocb, from, true);
3294}
3295
3296ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
3297{
3298 return __cifs_writev(iocb, from, false);
3299}
3300
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003301static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003302cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003303{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003304 struct file *file = iocb->ki_filp;
3305 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
3306 struct inode *inode = file->f_mapping->host;
3307 struct cifsInodeInfo *cinode = CIFS_I(inode);
3308 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04003309 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003310
Rabin Vincent966681c2017-06-29 16:01:42 +02003311 inode_lock(inode);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003312 /*
3313 * We need to hold the sem to be sure nobody modifies lock list
3314 * with a brlock that prevents writing.
3315 */
3316 down_read(&cinode->lock_sem);
Al Viro5f380c72015-04-07 11:28:12 -04003317
Al Viro3309dd02015-04-09 12:55:47 -04003318 rc = generic_write_checks(iocb, from);
3319 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04003320 goto out;
3321
Al Viro5f380c72015-04-07 11:28:12 -04003322 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Ronnie Sahlberg96457592018-10-04 09:24:38 +10003323 server->vals->exclusive_lock_type, 0,
3324 NULL, CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04003325 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04003326 else
3327 rc = -EACCES;
3328out:
Rabin Vincent966681c2017-06-29 16:01:42 +02003329 up_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05003330 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04003331
Christoph Hellwige2592212016-04-07 08:52:01 -07003332 if (rc > 0)
3333 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003334 return rc;
3335}
3336
3337ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003338cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003339{
Al Viro496ad9a2013-01-23 17:07:38 -05003340 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003341 struct cifsInodeInfo *cinode = CIFS_I(inode);
3342 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3343 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3344 iocb->ki_filp->private_data;
3345 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003346 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003347
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003348 written = cifs_get_writer(cinode);
3349 if (written)
3350 return written;
3351
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003352 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003353 if (cap_unix(tcon->ses) &&
3354 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003355 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04003356 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003357 goto out;
3358 }
Al Viro3dae8752014-04-03 12:05:17 -04003359 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003360 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04003361 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003362 /*
3363 * For non-oplocked files in strict cache mode we need to write the data
3364 * to the server exactly from the pos to pos+len-1 rather than flush all
3365 * affected pages because it may cause a error with mandatory locks on
3366 * these pages but not on the region from pos to ppos+len-1.
3367 */
Al Viro3dae8752014-04-03 12:05:17 -04003368 written = cifs_user_writev(iocb, from);
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003369 if (CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003370 /*
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003371 * We have read level caching and we have just sent a write
3372 * request to the server thus making data in the cache stale.
3373 * Zap the cache and set oplock/lease level to NONE to avoid
3374 * reading stale data from the cache. All subsequent read
3375 * operations will read new data from the server.
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003376 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003377 cifs_zap_mapping(inode);
Pavel Shilovsky6dfbd842019-03-04 17:48:01 -08003378 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05003379 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003380 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003381 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003382out:
3383 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003384 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003385}
3386
Jeff Layton0471ca32012-05-16 07:13:16 -04003387static struct cifs_readdata *
Long Lif9f5aca2018-05-30 12:47:54 -07003388cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04003389{
3390 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003391
Long Lif9f5aca2018-05-30 12:47:54 -07003392 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04003393 if (rdata != NULL) {
Long Lif9f5aca2018-05-30 12:47:54 -07003394 rdata->pages = pages;
Jeff Layton6993f742012-05-16 07:13:17 -04003395 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003396 INIT_LIST_HEAD(&rdata->list);
3397 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04003398 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04003399 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003400
Jeff Layton0471ca32012-05-16 07:13:16 -04003401 return rdata;
3402}
3403
Long Lif9f5aca2018-05-30 12:47:54 -07003404static struct cifs_readdata *
3405cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
3406{
3407 struct page **pages =
Kees Cook6396bb22018-06-12 14:03:40 -07003408 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Long Lif9f5aca2018-05-30 12:47:54 -07003409 struct cifs_readdata *ret = NULL;
3410
3411 if (pages) {
3412 ret = cifs_readdata_direct_alloc(pages, complete);
3413 if (!ret)
3414 kfree(pages);
3415 }
3416
3417 return ret;
3418}
3419
Jeff Layton6993f742012-05-16 07:13:17 -04003420void
3421cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04003422{
Jeff Layton6993f742012-05-16 07:13:17 -04003423 struct cifs_readdata *rdata = container_of(refcount,
3424 struct cifs_readdata, refcount);
Long Libd3dcc62017-11-22 17:38:47 -07003425#ifdef CONFIG_CIFS_SMB_DIRECT
3426 if (rdata->mr) {
3427 smbd_deregister_mr(rdata->mr);
3428 rdata->mr = NULL;
3429 }
3430#endif
Jeff Layton6993f742012-05-16 07:13:17 -04003431 if (rdata->cfile)
3432 cifsFileInfo_put(rdata->cfile);
3433
Long Lif9f5aca2018-05-30 12:47:54 -07003434 kvfree(rdata->pages);
Jeff Layton0471ca32012-05-16 07:13:16 -04003435 kfree(rdata);
3436}
3437
Jeff Layton2a1bb132012-05-16 07:13:17 -04003438static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003439cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04003440{
3441 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003442 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04003443 unsigned int i;
3444
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003445 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04003446 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3447 if (!page) {
3448 rc = -ENOMEM;
3449 break;
3450 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003451 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04003452 }
3453
3454 if (rc) {
Roberto Bergantinos Corpas31fad7d2019-05-28 09:38:14 +02003455 unsigned int nr_page_failed = i;
3456
3457 for (i = 0; i < nr_page_failed; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003458 put_page(rdata->pages[i]);
3459 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04003460 }
3461 }
3462 return rc;
3463}
3464
3465static void
3466cifs_uncached_readdata_release(struct kref *refcount)
3467{
Jeff Layton1c892542012-05-16 07:13:17 -04003468 struct cifs_readdata *rdata = container_of(refcount,
3469 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003470 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003471
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003472 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003473 for (i = 0; i < rdata->nr_pages; i++) {
3474 put_page(rdata->pages[i]);
Jeff Layton1c892542012-05-16 07:13:17 -04003475 }
3476 cifs_readdata_release(refcount);
3477}
3478
Jeff Layton1c892542012-05-16 07:13:17 -04003479/**
3480 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3481 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05003482 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04003483 *
3484 * This function copies data from a list of pages in a readdata response into
3485 * an array of iovecs. It will first calculate where the data should go
3486 * based on the info in the readdata and then copy the data into that spot.
3487 */
Al Viro7f25bba2014-02-04 14:07:43 -05003488static int
3489cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04003490{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04003491 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003492 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003493
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003494 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003495 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02003496 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003497 size_t written;
3498
David Howells00e23702018-10-22 13:07:28 +01003499 if (unlikely(iov_iter_is_pipe(iter))) {
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003500 void *addr = kmap_atomic(page);
3501
3502 written = copy_to_iter(addr, copy, iter);
3503 kunmap_atomic(addr);
3504 } else
3505 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05003506 remaining -= written;
3507 if (written < copy && iov_iter_count(iter) > 0)
3508 break;
Jeff Layton1c892542012-05-16 07:13:17 -04003509 }
Al Viro7f25bba2014-02-04 14:07:43 -05003510 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04003511}
3512
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003513static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3514
Jeff Layton1c892542012-05-16 07:13:17 -04003515static void
3516cifs_uncached_readv_complete(struct work_struct *work)
3517{
3518 struct cifs_readdata *rdata = container_of(work,
3519 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04003520
3521 complete(&rdata->done);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003522 collect_uncached_read_data(rdata->ctx);
3523 /* the below call can possibly free the last ref to aio ctx */
Jeff Layton1c892542012-05-16 07:13:17 -04003524 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3525}
3526
3527static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003528uncached_fill_pages(struct TCP_Server_Info *server,
3529 struct cifs_readdata *rdata, struct iov_iter *iter,
3530 unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04003531{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003532 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003533 unsigned int i;
3534 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003535 unsigned int page_offset = rdata->page_offset;
Jeff Layton1c892542012-05-16 07:13:17 -04003536
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003537 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07003538 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003539 for (i = 0; i < nr_pages; i++) {
3540 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05003541 size_t n;
Long Li1dbe3462018-05-30 12:47:55 -07003542 unsigned int segment_size = rdata->pagesz;
3543
3544 if (i == 0)
3545 segment_size -= page_offset;
3546 else
3547 page_offset = 0;
3548
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003549
Al Viro71335662016-01-09 19:54:50 -05003550 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04003551 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003552 rdata->pages[i] = NULL;
3553 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04003554 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07003555 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003556 }
Long Li1dbe3462018-05-30 12:47:55 -07003557
Al Viro71335662016-01-09 19:54:50 -05003558 n = len;
Long Li1dbe3462018-05-30 12:47:55 -07003559 if (len >= segment_size)
Al Viro71335662016-01-09 19:54:50 -05003560 /* enough data to fill the page */
Long Li1dbe3462018-05-30 12:47:55 -07003561 n = segment_size;
3562 else
Al Viro71335662016-01-09 19:54:50 -05003563 rdata->tailsz = len;
Long Li1dbe3462018-05-30 12:47:55 -07003564 len -= n;
3565
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003566 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003567 result = copy_page_from_iter(
3568 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003569#ifdef CONFIG_CIFS_SMB_DIRECT
3570 else if (rdata->mr)
3571 result = n;
3572#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003573 else
Long Li1dbe3462018-05-30 12:47:55 -07003574 result = cifs_read_page_from_socket(
3575 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003576 if (result < 0)
3577 break;
3578
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003579 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003580 }
3581
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003582 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3583 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003584}
3585
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003586static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003587cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3588 struct cifs_readdata *rdata, unsigned int len)
3589{
3590 return uncached_fill_pages(server, rdata, NULL, len);
3591}
3592
3593static int
3594cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3595 struct cifs_readdata *rdata,
3596 struct iov_iter *iter)
3597{
3598 return uncached_fill_pages(server, rdata, iter, iter->count);
3599}
3600
Long Li6e6e2b82018-10-31 22:13:09 +00003601static int cifs_resend_rdata(struct cifs_readdata *rdata,
3602 struct list_head *rdata_list,
3603 struct cifs_aio_ctx *ctx)
3604{
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003605 unsigned int rsize;
3606 struct cifs_credits credits;
Long Li6e6e2b82018-10-31 22:13:09 +00003607 int rc;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003608 struct TCP_Server_Info *server;
3609
3610 /* XXX: should we pick a new channel here? */
3611 server = rdata->server;
Long Li6e6e2b82018-10-31 22:13:09 +00003612
Long Li6e6e2b82018-10-31 22:13:09 +00003613 do {
Long Li0b0dfd52019-03-15 07:55:00 +00003614 if (rdata->cfile->invalidHandle) {
3615 rc = cifs_reopen_file(rdata->cfile, true);
3616 if (rc == -EAGAIN)
3617 continue;
3618 else if (rc)
3619 break;
3620 }
3621
3622 /*
3623 * Wait for credits to resend this rdata.
3624 * Note: we are attempting to resend the whole rdata not in
3625 * segments
3626 */
3627 do {
3628 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
Long Li6e6e2b82018-10-31 22:13:09 +00003629 &rsize, &credits);
3630
Long Li0b0dfd52019-03-15 07:55:00 +00003631 if (rc)
3632 goto fail;
Long Li6e6e2b82018-10-31 22:13:09 +00003633
Long Li0b0dfd52019-03-15 07:55:00 +00003634 if (rsize < rdata->bytes) {
3635 add_credits_and_wake_if(server, &credits, 0);
3636 msleep(1000);
3637 }
3638 } while (rsize < rdata->bytes);
3639 rdata->credits = credits;
3640
3641 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3642 if (!rc) {
3643 if (rdata->cfile->invalidHandle)
3644 rc = -EAGAIN;
Long Lib7a55bb2019-10-15 22:54:50 +00003645 else {
3646#ifdef CONFIG_CIFS_SMB_DIRECT
3647 if (rdata->mr) {
3648 rdata->mr->need_invalidate = true;
3649 smbd_deregister_mr(rdata->mr);
3650 rdata->mr = NULL;
3651 }
3652#endif
Long Li0b0dfd52019-03-15 07:55:00 +00003653 rc = server->ops->async_readv(rdata);
Long Lib7a55bb2019-10-15 22:54:50 +00003654 }
Long Li6e6e2b82018-10-31 22:13:09 +00003655 }
Long Li6e6e2b82018-10-31 22:13:09 +00003656
Long Li0b0dfd52019-03-15 07:55:00 +00003657 /* If the read was successfully sent, we are done */
3658 if (!rc) {
3659 /* Add to aio pending list */
3660 list_add_tail(&rdata->list, rdata_list);
3661 return 0;
3662 }
Long Li6e6e2b82018-10-31 22:13:09 +00003663
Long Li0b0dfd52019-03-15 07:55:00 +00003664 /* Roll back credits and retry if needed */
3665 add_credits_and_wake_if(server, &rdata->credits, 0);
3666 } while (rc == -EAGAIN);
Long Li6e6e2b82018-10-31 22:13:09 +00003667
Long Li0b0dfd52019-03-15 07:55:00 +00003668fail:
3669 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Long Li6e6e2b82018-10-31 22:13:09 +00003670 return rc;
3671}
3672
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003673static int
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003674cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003675 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3676 struct cifs_aio_ctx *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003678 struct cifs_readdata *rdata;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003679 unsigned int npages, rsize;
3680 struct cifs_credits credits_on_stack;
3681 struct cifs_credits *credits = &credits_on_stack;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003682 size_t cur_len;
3683 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003684 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003685 struct TCP_Server_Info *server;
Long Li6e6e2b82018-10-31 22:13:09 +00003686 struct page **pagevec;
3687 size_t start;
3688 struct iov_iter direct_iov = ctx->iter;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003689
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003690 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003691
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003692 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3693 pid = open_file->pid;
3694 else
3695 pid = current->tgid;
3696
Long Li6e6e2b82018-10-31 22:13:09 +00003697 if (ctx->direct_io)
3698 iov_iter_advance(&direct_iov, offset - ctx->pos);
3699
Jeff Layton1c892542012-05-16 07:13:17 -04003700 do {
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003701 if (open_file->invalidHandle) {
3702 rc = cifs_reopen_file(open_file, true);
3703 if (rc == -EAGAIN)
3704 continue;
3705 else if (rc)
3706 break;
3707 }
3708
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10003709 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003710 &rsize, credits);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003711 if (rc)
3712 break;
3713
3714 cur_len = min_t(const size_t, len, rsize);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003715
Long Li6e6e2b82018-10-31 22:13:09 +00003716 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05003717 ssize_t result;
Long Li6e6e2b82018-10-31 22:13:09 +00003718
Steve Frenchb98e26d2018-11-01 10:54:32 -05003719 result = iov_iter_get_pages_alloc(
Long Li6e6e2b82018-10-31 22:13:09 +00003720 &direct_iov, &pagevec,
3721 cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05003722 if (result < 0) {
Long Li6e6e2b82018-10-31 22:13:09 +00003723 cifs_dbg(VFS,
Joe Perchesa0a30362020-04-14 22:42:53 -07003724 "Couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
3725 result, iov_iter_type(&direct_iov),
3726 direct_iov.iov_offset,
3727 direct_iov.count);
Long Li6e6e2b82018-10-31 22:13:09 +00003728 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00003729
3730 rc = result;
3731 add_credits_and_wake_if(server, credits, 0);
Long Li6e6e2b82018-10-31 22:13:09 +00003732 break;
3733 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05003734 cur_len = (size_t)result;
Long Li6e6e2b82018-10-31 22:13:09 +00003735 iov_iter_advance(&direct_iov, cur_len);
3736
3737 rdata = cifs_readdata_direct_alloc(
3738 pagevec, cifs_uncached_readv_complete);
3739 if (!rdata) {
3740 add_credits_and_wake_if(server, credits, 0);
3741 rc = -ENOMEM;
3742 break;
3743 }
3744
3745 npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
3746 rdata->page_offset = start;
3747 rdata->tailsz = npages > 1 ?
3748 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
3749 cur_len;
3750
3751 } else {
3752
3753 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3754 /* allocate a readdata struct */
3755 rdata = cifs_readdata_alloc(npages,
Jeff Layton1c892542012-05-16 07:13:17 -04003756 cifs_uncached_readv_complete);
Long Li6e6e2b82018-10-31 22:13:09 +00003757 if (!rdata) {
3758 add_credits_and_wake_if(server, credits, 0);
3759 rc = -ENOMEM;
3760 break;
3761 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003762
Long Li6e6e2b82018-10-31 22:13:09 +00003763 rc = cifs_read_allocate_pages(rdata, npages);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08003764 if (rc) {
3765 kvfree(rdata->pages);
3766 kfree(rdata);
3767 add_credits_and_wake_if(server, credits, 0);
3768 break;
3769 }
Long Li6e6e2b82018-10-31 22:13:09 +00003770
3771 rdata->tailsz = PAGE_SIZE;
3772 }
Jeff Layton1c892542012-05-16 07:13:17 -04003773
Aurelien Aptel352d96f2020-05-31 12:38:22 -05003774 rdata->server = server;
Jeff Layton1c892542012-05-16 07:13:17 -04003775 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003776 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003777 rdata->offset = offset;
3778 rdata->bytes = cur_len;
3779 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003780 rdata->pagesz = PAGE_SIZE;
3781 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003782 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003783 rdata->credits = credits_on_stack;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003784 rdata->ctx = ctx;
3785 kref_get(&ctx->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003786
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003787 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3788
3789 if (!rc) {
3790 if (rdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08003791 rc = -EAGAIN;
3792 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08003793 rc = server->ops->async_readv(rdata);
3794 }
3795
Jeff Layton1c892542012-05-16 07:13:17 -04003796 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08003797 add_credits_and_wake_if(server, &rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003798 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003799 cifs_uncached_readdata_release);
3800 if (rc == -EAGAIN) {
3801 iov_iter_revert(&direct_iov, cur_len);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003802 continue;
Long Li6e6e2b82018-10-31 22:13:09 +00003803 }
Jeff Layton1c892542012-05-16 07:13:17 -04003804 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805 }
Jeff Layton1c892542012-05-16 07:13:17 -04003806
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003807 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003808 offset += cur_len;
3809 len -= cur_len;
3810 } while (len > 0);
3811
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003812 return rc;
3813}
3814
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003815static void
3816collect_uncached_read_data(struct cifs_aio_ctx *ctx)
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003817{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003818 struct cifs_readdata *rdata, *tmp;
3819 struct iov_iter *to = &ctx->iter;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003820 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003821 int rc;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003822
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003823 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003824
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003825 mutex_lock(&ctx->aio_mutex);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003826
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003827 if (list_empty(&ctx->list)) {
3828 mutex_unlock(&ctx->aio_mutex);
3829 return;
3830 }
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003831
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003832 rc = ctx->rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003833 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003834again:
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003835 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
Jeff Layton1c892542012-05-16 07:13:17 -04003836 if (!rc) {
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003837 if (!try_wait_for_completion(&rdata->done)) {
3838 mutex_unlock(&ctx->aio_mutex);
3839 return;
3840 }
3841
3842 if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003843 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003844 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003845 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003846
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003847 list_del_init(&rdata->list);
3848 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003849
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003850 /*
3851 * Got a part of data and then reconnect has
3852 * happened -- fill the buffer and continue
3853 * reading.
3854 */
3855 if (got_bytes && got_bytes < rdata->bytes) {
Long Li6e6e2b82018-10-31 22:13:09 +00003856 rc = 0;
3857 if (!ctx->direct_io)
3858 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003859 if (rc) {
3860 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003861 cifs_uncached_readdata_release);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003862 continue;
3863 }
3864 }
3865
Long Li6e6e2b82018-10-31 22:13:09 +00003866 if (ctx->direct_io) {
3867 /*
3868 * Re-use rdata as this is a
3869 * direct I/O
3870 */
3871 rc = cifs_resend_rdata(
3872 rdata,
3873 &tmp_list, ctx);
3874 } else {
3875 rc = cifs_send_async_read(
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003876 rdata->offset + got_bytes,
3877 rdata->bytes - got_bytes,
3878 rdata->cfile, cifs_sb,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003879 &tmp_list, ctx);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003880
Long Li6e6e2b82018-10-31 22:13:09 +00003881 kref_put(&rdata->refcount,
3882 cifs_uncached_readdata_release);
3883 }
3884
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003885 list_splice(&tmp_list, &ctx->list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003886
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003887 goto again;
3888 } else if (rdata->result)
3889 rc = rdata->result;
Long Li6e6e2b82018-10-31 22:13:09 +00003890 else if (!ctx->direct_io)
Jeff Layton1c892542012-05-16 07:13:17 -04003891 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003892
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003893 /* if there was a short read -- discard anything left */
3894 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3895 rc = -ENODATA;
Long Li6e6e2b82018-10-31 22:13:09 +00003896
3897 ctx->total_len += rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003898 }
3899 list_del_init(&rdata->list);
3900 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003902
Jérôme Glisse13f59382019-04-10 15:37:47 -04003903 if (!ctx->direct_io)
Long Li6e6e2b82018-10-31 22:13:09 +00003904 ctx->total_len = ctx->len - iov_iter_count(to);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003905
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003906 /* mask nodata case */
3907 if (rc == -ENODATA)
3908 rc = 0;
3909
Yilu Lin97adda82020-03-18 11:59:19 +08003910 ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003911
3912 mutex_unlock(&ctx->aio_mutex);
3913
3914 if (ctx->iocb && ctx->iocb->ki_complete)
3915 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3916 else
3917 complete(&ctx->done);
3918}
3919
Long Li6e6e2b82018-10-31 22:13:09 +00003920static ssize_t __cifs_readv(
3921 struct kiocb *iocb, struct iov_iter *to, bool direct)
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003922{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003923 size_t len;
Long Li6e6e2b82018-10-31 22:13:09 +00003924 struct file *file = iocb->ki_filp;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003925 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003926 struct cifsFileInfo *cfile;
Long Li6e6e2b82018-10-31 22:13:09 +00003927 struct cifs_tcon *tcon;
3928 ssize_t rc, total_read = 0;
3929 loff_t offset = iocb->ki_pos;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003930 struct cifs_aio_ctx *ctx;
3931
Long Li6e6e2b82018-10-31 22:13:09 +00003932 /*
3933 * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
3934 * fall back to data copy read path
3935 * this could be improved by getting pages directly in ITER_KVEC
3936 */
David Howells66294002019-11-21 08:13:58 +00003937 if (direct && iov_iter_is_kvec(to)) {
Long Li6e6e2b82018-10-31 22:13:09 +00003938 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
3939 direct = false;
3940 }
3941
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003942 len = iov_iter_count(to);
3943 if (!len)
3944 return 0;
3945
3946 cifs_sb = CIFS_FILE_SB(file);
3947 cfile = file->private_data;
3948 tcon = tlink_tcon(cfile->tlink);
3949
3950 if (!tcon->ses->server->ops->async_readv)
3951 return -ENOSYS;
3952
3953 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3954 cifs_dbg(FYI, "attempting read on write only file instance\n");
3955
3956 ctx = cifs_aio_ctx_alloc();
3957 if (!ctx)
3958 return -ENOMEM;
3959
3960 ctx->cfile = cifsFileInfo_get(cfile);
3961
3962 if (!is_sync_kiocb(iocb))
3963 ctx->iocb = iocb;
3964
David Howells00e23702018-10-22 13:07:28 +01003965 if (iter_is_iovec(to))
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003966 ctx->should_dirty = true;
3967
Long Li6e6e2b82018-10-31 22:13:09 +00003968 if (direct) {
3969 ctx->pos = offset;
3970 ctx->direct_io = true;
3971 ctx->iter = *to;
3972 ctx->len = len;
3973 } else {
3974 rc = setup_aio_ctx_iter(ctx, to, READ);
3975 if (rc) {
3976 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3977 return rc;
3978 }
3979 len = ctx->len;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003980 }
3981
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003982 /* grab a lock here due to read response handlers can access ctx */
3983 mutex_lock(&ctx->aio_mutex);
3984
3985 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
3986
3987 /* if at least one read request send succeeded, then reset rc */
3988 if (!list_empty(&ctx->list))
3989 rc = 0;
3990
3991 mutex_unlock(&ctx->aio_mutex);
3992
3993 if (rc) {
3994 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3995 return rc;
3996 }
3997
3998 if (!is_sync_kiocb(iocb)) {
3999 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4000 return -EIOCBQUEUED;
4001 }
4002
4003 rc = wait_for_completion_killable(&ctx->done);
4004 if (rc) {
4005 mutex_lock(&ctx->aio_mutex);
4006 ctx->rc = rc = -EINTR;
4007 total_read = ctx->total_len;
4008 mutex_unlock(&ctx->aio_mutex);
4009 } else {
4010 rc = ctx->rc;
4011 total_read = ctx->total_len;
4012 }
4013
4014 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4015
Al Viro0165e812014-02-04 14:19:48 -05004016 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04004017 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05004018 return total_read;
4019 }
4020 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03004021}
4022
Long Li6e6e2b82018-10-31 22:13:09 +00004023ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
4024{
4025 return __cifs_readv(iocb, to, true);
4026}
4027
4028ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
4029{
4030 return __cifs_readv(iocb, to, false);
4031}
4032
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004033ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04004034cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03004035{
Al Viro496ad9a2013-01-23 17:07:38 -05004036 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004037 struct cifsInodeInfo *cinode = CIFS_I(inode);
4038 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4039 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
4040 iocb->ki_filp->private_data;
4041 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
4042 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03004043
4044 /*
4045 * In strict cache mode we need to read from the server all the time
4046 * if we don't have level II oplock because the server can delay mtime
4047 * change - so we can't make a decision about inode invalidating.
4048 * And we can also fail with pagereading if there are mandatory locks
4049 * on pages affected by this read but not on the region from pos to
4050 * pos+len-1.
4051 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004052 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04004053 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03004054
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004055 if (cap_unix(tcon->ses) &&
4056 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
4057 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04004058 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004059
4060 /*
4061 * We need to hold the sem to be sure nobody modifies lock list
4062 * with a brlock that prevents reading.
4063 */
4064 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04004065 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004066 tcon->ses->server->vals->shared_lock_type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10004067 0, NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04004068 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07004069 up_read(&cinode->lock_sem);
4070 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03004071}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004072
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004073static ssize_t
4074cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004075{
4076 int rc = -EACCES;
4077 unsigned int bytes_read = 0;
4078 unsigned int total_read;
4079 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04004080 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004081 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004082 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004083 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004084 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004085 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004086 struct cifsFileInfo *open_file;
Aurelien Aptel7c065142020-06-04 17:23:55 +02004087 struct cifs_io_parms io_parms = {0};
Steve Frenchec637e32005-12-12 20:53:18 -08004088 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004089 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004091 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04004092 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004093
Jeff Layton5eba8ab2011-10-19 15:30:26 -04004094 /* FIXME: set up handlers for larger reads and/or convert to async */
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10004095 rsize = min_t(unsigned int, cifs_sb->ctx->rsize, CIFSMaxBufSize);
Jeff Layton5eba8ab2011-10-19 15:30:26 -04004096
Linus Torvalds1da177e2005-04-16 15:20:36 -07004097 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304098 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004099 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304100 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07004102 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004103 tcon = tlink_tcon(open_file->tlink);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004104 server = cifs_pick_channel(tcon->ses);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004105
4106 if (!server->ops->sync_read) {
4107 free_xid(xid);
4108 return -ENOSYS;
4109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004110
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004111 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4112 pid = open_file->pid;
4113 else
4114 pid = current->tgid;
4115
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05004117 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004118
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004119 for (total_read = 0, cur_offset = read_data; read_size > total_read;
4120 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04004121 do {
4122 current_read_size = min_t(uint, read_size - total_read,
4123 rsize);
4124 /*
4125 * For windows me and 9x we do not want to request more
4126 * than it negotiated since it will refuse the read
4127 * then.
4128 */
Steve French9bd21d42020-05-13 10:27:16 -05004129 if (!(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004130 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04004131 current_read_size = min_t(uint,
4132 current_read_size, CIFSMaxBufSize);
4133 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00004134 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04004135 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004136 if (rc != 0)
4137 break;
4138 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004139 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004140 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004141 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004142 io_parms.length = current_read_size;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004143 io_parms.server = server;
Steve Frenchdb8b6312014-09-22 05:13:55 -05004144 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004145 &bytes_read, &cur_offset,
4146 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04004147 } while (rc == -EAGAIN);
4148
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149 if (rc || (bytes_read == 0)) {
4150 if (total_read) {
4151 break;
4152 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004153 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154 return rc;
4155 }
4156 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04004157 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07004158 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004159 }
4160 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004161 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162 return total_read;
4163}
4164
Jeff Laytonca83ce32011-04-12 09:13:44 -04004165/*
4166 * If the page is mmap'ed into a process' page tables, then we need to make
4167 * sure that it doesn't change while being written back.
4168 */
Souptick Joardera5240cb2018-04-15 00:58:25 +05304169static vm_fault_t
Dave Jiang11bac802017-02-24 14:56:41 -08004170cifs_page_mkwrite(struct vm_fault *vmf)
Jeff Laytonca83ce32011-04-12 09:13:44 -04004171{
4172 struct page *page = vmf->page;
4173
4174 lock_page(page);
4175 return VM_FAULT_LOCKED;
4176}
4177
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07004178static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04004179 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07004180 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04004181 .page_mkwrite = cifs_page_mkwrite,
4182};
4183
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004184int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
4185{
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004186 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05004187 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004188
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004189 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004190
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004191 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004192 rc = cifs_zap_mapping(inode);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004193 if (!rc)
4194 rc = generic_file_mmap(file, vma);
4195 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04004196 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004197
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004198 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03004199 return rc;
4200}
4201
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
4203{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204 int rc, xid;
4205
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004206 xid = get_xid();
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004207
Jeff Laytonabab0952010-02-12 07:44:18 -05004208 rc = cifs_revalidate_file(file);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004209 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004210 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
4211 rc);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004212 if (!rc)
4213 rc = generic_file_mmap(file, vma);
4214 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04004215 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08004216
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004217 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218 return rc;
4219}
4220
Jeff Layton0471ca32012-05-16 07:13:16 -04004221static void
4222cifs_readv_complete(struct work_struct *work)
4223{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004224 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04004225 struct cifs_readdata *rdata = container_of(work,
4226 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04004227
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004228 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004229 for (i = 0; i < rdata->nr_pages; i++) {
4230 struct page *page = rdata->pages[i];
4231
Johannes Weiner6058eae2020-06-03 16:02:40 -07004232 lru_cache_add(page);
Jeff Layton0471ca32012-05-16 07:13:16 -04004233
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004234 if (rdata->result == 0 ||
4235 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04004236 flush_dcache_page(page);
4237 SetPageUptodate(page);
4238 }
4239
4240 unlock_page(page);
4241
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004242 if (rdata->result == 0 ||
4243 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04004244 cifs_readpage_to_fscache(rdata->mapping->host, page);
4245
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004246 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04004247
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004248 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004249 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04004250 }
Jeff Layton6993f742012-05-16 07:13:17 -04004251 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04004252}
4253
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004254static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004255readpages_fill_pages(struct TCP_Server_Info *server,
4256 struct cifs_readdata *rdata, struct iov_iter *iter,
4257 unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004258{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004259 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004260 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004261 u64 eof;
4262 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004263 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07004264 unsigned int page_offset = rdata->page_offset;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004265
4266 /* determine the eof that the server (probably) has */
4267 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004268 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05004269 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004270
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004271 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004272 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004273 for (i = 0; i < nr_pages; i++) {
4274 struct page *page = rdata->pages[i];
Long Li1dbe3462018-05-30 12:47:55 -07004275 unsigned int to_read = rdata->pagesz;
4276 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004277
Long Li1dbe3462018-05-30 12:47:55 -07004278 if (i == 0)
4279 to_read -= page_offset;
4280 else
4281 page_offset = 0;
4282
4283 n = to_read;
4284
4285 if (len >= to_read) {
4286 len -= to_read;
Jeff Layton8321fec2012-09-19 06:22:32 -07004287 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004288 /* enough for partial page, fill and zero the rest */
Long Li1dbe3462018-05-30 12:47:55 -07004289 zero_user(page, len + page_offset, to_read - len);
Al Viro71335662016-01-09 19:54:50 -05004290 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07004291 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004292 } else if (page->index > eof_index) {
4293 /*
4294 * The VFS will not try to do readahead past the
4295 * i_size, but it's possible that we have outstanding
4296 * writes with gaps in the middle and the i_size hasn't
4297 * caught up yet. Populate those with zeroed out pages
4298 * to prevent the VFS from repeatedly attempting to
4299 * fill them until the writes are flushed.
4300 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004301 zero_user(page, 0, PAGE_SIZE);
Johannes Weiner6058eae2020-06-03 16:02:40 -07004302 lru_cache_add(page);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004303 flush_dcache_page(page);
4304 SetPageUptodate(page);
4305 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004306 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004307 rdata->pages[i] = NULL;
4308 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07004309 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004310 } else {
4311 /* no need to hold page hostage */
Johannes Weiner6058eae2020-06-03 16:02:40 -07004312 lru_cache_add(page);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004313 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004314 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004315 rdata->pages[i] = NULL;
4316 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07004317 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004318 }
Jeff Layton8321fec2012-09-19 06:22:32 -07004319
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004320 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07004321 result = copy_page_from_iter(
4322 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07004323#ifdef CONFIG_CIFS_SMB_DIRECT
4324 else if (rdata->mr)
4325 result = n;
4326#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004327 else
Long Li1dbe3462018-05-30 12:47:55 -07004328 result = cifs_read_page_from_socket(
4329 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07004330 if (result < 0)
4331 break;
4332
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004333 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004334 }
4335
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04004336 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
4337 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04004338}
4339
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004340static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004341cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
4342 struct cifs_readdata *rdata, unsigned int len)
4343{
4344 return readpages_fill_pages(server, rdata, NULL, len);
4345}
4346
4347static int
4348cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
4349 struct cifs_readdata *rdata,
4350 struct iov_iter *iter)
4351{
4352 return readpages_fill_pages(server, rdata, iter, iter->count);
4353}
4354
4355static int
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004356readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
4357 unsigned int rsize, struct list_head *tmplist,
4358 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
4359{
4360 struct page *page, *tpage;
4361 unsigned int expected_index;
4362 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07004363 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004364
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004365 INIT_LIST_HEAD(tmplist);
4366
Nikolay Borisovf86196e2019-01-03 15:29:02 -08004367 page = lru_to_page(page_list);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004368
4369 /*
4370 * Lock the page and put it in the cache. Since no one else
4371 * should have access to this page, we're safe to simply set
4372 * PG_locked without checking it first.
4373 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004374 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004375 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07004376 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004377
4378 /* give up if we can't stick it in the cache */
4379 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004380 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004381 return rc;
4382 }
4383
4384 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004385 *offset = (loff_t)page->index << PAGE_SHIFT;
4386 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004387 *nr_pages = 1;
4388 list_move_tail(&page->lru, tmplist);
4389
4390 /* now try and add more pages onto the request */
4391 expected_index = page->index + 1;
4392 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
4393 /* discontinuity ? */
4394 if (page->index != expected_index)
4395 break;
4396
4397 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004398 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004399 break;
4400
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004401 __SetPageLocked(page);
Zhang Xiaoxu95a3d8f2020-06-22 05:30:19 -04004402 rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
4403 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004404 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004405 break;
4406 }
4407 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004408 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004409 expected_index++;
4410 (*nr_pages)++;
4411 }
4412 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004413}
4414
Linus Torvalds1da177e2005-04-16 15:20:36 -07004415static int cifs_readpages(struct file *file, struct address_space *mapping,
4416 struct list_head *page_list, unsigned num_pages)
4417{
Jeff Layton690c5e32011-10-19 15:30:16 -04004418 int rc;
Zhang Xiaoxu95a3d8f2020-06-22 05:30:19 -04004419 int err = 0;
Jeff Layton690c5e32011-10-19 15:30:16 -04004420 struct list_head tmplist;
4421 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04004422 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004423 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04004424 pid_t pid;
Steve French0cb012d2018-10-11 01:01:02 -05004425 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426
Steve French0cb012d2018-10-11 01:01:02 -05004427 xid = get_xid();
Jeff Layton690c5e32011-10-19 15:30:16 -04004428 /*
Suresh Jayaraman566982362010-07-05 18:13:25 +05304429 * Reads as many pages as possible from fscache. Returns -ENOBUFS
4430 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00004431 *
4432 * After this point, every page in the list might have PG_fscache set,
4433 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman566982362010-07-05 18:13:25 +05304434 */
4435 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
4436 &num_pages);
Steve French0cb012d2018-10-11 01:01:02 -05004437 if (rc == 0) {
4438 free_xid(xid);
Jeff Layton690c5e32011-10-19 15:30:16 -04004439 return rc;
Steve French0cb012d2018-10-11 01:01:02 -05004440 }
Suresh Jayaraman566982362010-07-05 18:13:25 +05304441
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004442 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4443 pid = open_file->pid;
4444 else
4445 pid = current->tgid;
4446
Jeff Layton690c5e32011-10-19 15:30:16 -04004447 rc = 0;
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004448 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449
Joe Perchesf96637b2013-05-04 22:12:25 -05004450 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4451 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04004452
4453 /*
4454 * Start with the page at end of list and move it to private
4455 * list. Do the same with any following pages until we hit
4456 * the rsize limit, hit an index discontinuity, or run out of
4457 * pages. Issue the async read and then start the loop again
4458 * until the list is empty.
4459 *
4460 * Note that list order is important. The page_list is in
4461 * the order of declining indexes. When we put the pages in
4462 * the rdata->pages, then we want them in increasing order.
4463 */
Zhang Xiaoxu95a3d8f2020-06-22 05:30:19 -04004464 while (!list_empty(page_list) && !err) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004465 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04004466 loff_t offset;
4467 struct page *page, *tpage;
4468 struct cifs_readdata *rdata;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004469 struct cifs_credits credits_on_stack;
4470 struct cifs_credits *credits = &credits_on_stack;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004471
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004472 if (open_file->invalidHandle) {
4473 rc = cifs_reopen_file(open_file, true);
4474 if (rc == -EAGAIN)
4475 continue;
4476 else if (rc)
4477 break;
4478 }
4479
Ronnie Sahlberg522aa3b2020-12-14 16:40:17 +10004480 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004481 &rsize, credits);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004482 if (rc)
4483 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484
Jeff Layton690c5e32011-10-19 15:30:16 -04004485 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004486 * Give up immediately if rsize is too small to read an entire
4487 * page. The VFS will fall back to readpage. We should never
4488 * reach this point however since we set ra_pages to 0 when the
4489 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04004490 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004491 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004492 add_credits_and_wake_if(server, credits, 0);
Steve French0cb012d2018-10-11 01:01:02 -05004493 free_xid(xid);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004494 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496
Zhang Xiaoxu95a3d8f2020-06-22 05:30:19 -04004497 nr_pages = 0;
4498 err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004499 &nr_pages, &offset, &bytes);
Zhang Xiaoxu95a3d8f2020-06-22 05:30:19 -04004500 if (!nr_pages) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004501 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04004503 }
4504
Jeff Layton0471ca32012-05-16 07:13:16 -04004505 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04004506 if (!rdata) {
4507 /* best to give up if we're out of mem */
4508 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4509 list_del(&page->lru);
Johannes Weiner6058eae2020-06-03 16:02:40 -07004510 lru_cache_add(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04004511 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004512 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04004513 }
4514 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004515 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04004516 break;
4517 }
4518
Jeff Layton6993f742012-05-16 07:13:17 -04004519 rdata->cfile = cifsFileInfo_get(open_file);
Aurelien Aptel352d96f2020-05-31 12:38:22 -05004520 rdata->server = server;
Jeff Layton690c5e32011-10-19 15:30:16 -04004521 rdata->mapping = mapping;
4522 rdata->offset = offset;
4523 rdata->bytes = bytes;
4524 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004525 rdata->pagesz = PAGE_SIZE;
Long Li1dbe3462018-05-30 12:47:55 -07004526 rdata->tailsz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07004527 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004528 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004529 rdata->credits = credits_on_stack;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004530
4531 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4532 list_del(&page->lru);
4533 rdata->pages[rdata->nr_pages++] = page;
4534 }
Jeff Layton690c5e32011-10-19 15:30:16 -04004535
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004536 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4537
4538 if (!rc) {
4539 if (rdata->cfile->invalidHandle)
Pavel Shilovsky3e952992019-01-25 11:59:01 -08004540 rc = -EAGAIN;
4541 else
Pavel Shilovsky9a1c67e2019-01-23 18:15:52 -08004542 rc = server->ops->async_readv(rdata);
4543 }
4544
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004545 if (rc) {
Pavel Shilovsky335b7b62019-01-16 11:12:41 -08004546 add_credits_and_wake_if(server, &rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004547 for (i = 0; i < rdata->nr_pages; i++) {
4548 page = rdata->pages[i];
Johannes Weiner6058eae2020-06-03 16:02:40 -07004549 lru_cache_add(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04004550 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004551 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004552 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04004553 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04004554 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004555 break;
4556 }
Jeff Layton6993f742012-05-16 07:13:17 -04004557
4558 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559 }
4560
David Howells54afa992013-09-04 17:10:39 +00004561 /* Any pages that have been shown to fscache but didn't get added to
4562 * the pagecache must be uncached before they get returned to the
4563 * allocator.
4564 */
4565 cifs_fscache_readpages_cancel(mapping->host, page_list);
Steve French0cb012d2018-10-11 01:01:02 -05004566 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004567 return rc;
4568}
4569
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01004570/*
4571 * cifs_readpage_worker must be called with the page pinned
4572 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573static int cifs_readpage_worker(struct file *file, struct page *page,
4574 loff_t *poffset)
4575{
4576 char *read_data;
4577 int rc;
4578
Suresh Jayaraman566982362010-07-05 18:13:25 +05304579 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05004580 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304581 if (rc == 0)
4582 goto read_complete;
4583
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584 read_data = kmap(page);
4585 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004586
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004587 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004588
Linus Torvalds1da177e2005-04-16 15:20:36 -07004589 if (rc < 0)
4590 goto io_error;
4591 else
Joe Perchesf96637b2013-05-04 22:12:25 -05004592 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004593
Steve French9b9c5be2018-09-22 12:07:06 -05004594 /* we do not want atime to be less than mtime, it broke some apps */
4595 file_inode(file)->i_atime = current_time(file_inode(file));
4596 if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4597 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4598 else
4599 file_inode(file)->i_atime = current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004600
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004601 if (PAGE_SIZE > rc)
4602 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603
4604 flush_dcache_page(page);
4605 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304606
4607 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05004608 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304609
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004611
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004613 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004614 unlock_page(page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304615
4616read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004617 return rc;
4618}
4619
4620static int cifs_readpage(struct file *file, struct page *page)
4621{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004622 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004624 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004626 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627
4628 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304629 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004630 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304631 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632 }
4633
Joe Perchesf96637b2013-05-04 22:12:25 -05004634 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00004635 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636
4637 rc = cifs_readpage_worker(file, page, &offset);
4638
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004639 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004640 return rc;
4641}
4642
Steve Frencha403a0a2007-07-26 15:54:16 +00004643static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4644{
4645 struct cifsFileInfo *open_file;
4646
Dave Wysochanskicb248812019-10-03 15:16:27 +10004647 spin_lock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004648 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04004649 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Dave Wysochanskicb248812019-10-03 15:16:27 +10004650 spin_unlock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004651 return 1;
4652 }
4653 }
Dave Wysochanskicb248812019-10-03 15:16:27 +10004654 spin_unlock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004655 return 0;
4656}
4657
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658/* We do not want to update the file size from server for inodes
4659 open for write - to avoid races with writepage extending
4660 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004661 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662 but this is tricky to do without racing with writebehind
4663 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00004664bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665{
Steve Frencha403a0a2007-07-26 15:54:16 +00004666 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00004667 return true;
Steve French23e7dd72005-10-20 13:44:56 -07004668
Steve Frencha403a0a2007-07-26 15:54:16 +00004669 if (is_inode_writable(cifsInode)) {
4670 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08004671 struct cifs_sb_info *cifs_sb;
4672
Steve Frenchc32a0b62006-01-12 14:41:28 -08004673 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00004674 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004675 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08004676 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00004677 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08004678 }
4679
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004680 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00004681 return true;
Steve French7ba526312007-02-08 18:14:13 +00004682
Steve French4b18f2a2008-04-29 00:06:05 +00004683 return false;
Steve French23e7dd72005-10-20 13:44:56 -07004684 } else
Steve French4b18f2a2008-04-29 00:06:05 +00004685 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004686}
4687
Nick Piggind9414772008-09-24 11:32:59 -04004688static int cifs_write_begin(struct file *file, struct address_space *mapping,
4689 loff_t pos, unsigned len, unsigned flags,
4690 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004691{
Sachin Prabhu466bd312013-09-13 14:11:57 +01004692 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004693 pgoff_t index = pos >> PAGE_SHIFT;
4694 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004695 loff_t page_start = pos & PAGE_MASK;
4696 loff_t i_size;
4697 struct page *page;
4698 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699
Joe Perchesf96637b2013-05-04 22:12:25 -05004700 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04004701
Sachin Prabhu466bd312013-09-13 14:11:57 +01004702start:
Nick Piggin54566b22009-01-04 12:00:53 -08004703 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004704 if (!page) {
4705 rc = -ENOMEM;
4706 goto out;
4707 }
Nick Piggind9414772008-09-24 11:32:59 -04004708
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004709 if (PageUptodate(page))
4710 goto out;
Steve French8a236262007-03-06 00:31:00 +00004711
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004712 /*
4713 * If we write a full page it will be up to date, no need to read from
4714 * the server. If the write is short, we'll end up doing a sync write
4715 * instead.
4716 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004717 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004718 goto out;
4719
4720 /*
4721 * optimize away the read when we have an oplock, and we're not
4722 * expecting to use any of the data we'd be reading in. That
4723 * is, when the page lies beyond the EOF, or straddles the EOF
4724 * and the write will cover all of the existing data.
4725 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004726 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004727 i_size = i_size_read(mapping->host);
4728 if (page_start >= i_size ||
4729 (offset == 0 && (pos + len) >= i_size)) {
4730 zero_user_segments(page, 0, offset,
4731 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004732 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004733 /*
4734 * PageChecked means that the parts of the page
4735 * to which we're not writing are considered up
4736 * to date. Once the data is copied to the
4737 * page, it can be set uptodate.
4738 */
4739 SetPageChecked(page);
4740 goto out;
4741 }
4742 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743
Sachin Prabhu466bd312013-09-13 14:11:57 +01004744 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004745 /*
4746 * might as well read a page, it is fast enough. If we get
4747 * an error, we don't need to return it. cifs_write_end will
4748 * do a sync write instead since PG_uptodate isn't set.
4749 */
4750 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004751 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004752 oncethru = 1;
4753 goto start;
Steve French8a236262007-03-06 00:31:00 +00004754 } else {
4755 /* we could try using another file handle if there is one -
4756 but how would we lock it to prevent close of that handle
4757 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04004758 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00004759 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004760out:
4761 *pagep = page;
4762 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004763}
4764
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304765static int cifs_release_page(struct page *page, gfp_t gfp)
4766{
4767 if (PagePrivate(page))
4768 return 0;
4769
4770 return cifs_fscache_release_page(page, gfp);
4771}
4772
Lukas Czernerd47992f2013-05-21 23:17:23 -04004773static void cifs_invalidate_page(struct page *page, unsigned int offset,
4774 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304775{
4776 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4777
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004778 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304779 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4780}
4781
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004782static int cifs_launder_page(struct page *page)
4783{
4784 int rc = 0;
4785 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004786 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004787 struct writeback_control wbc = {
4788 .sync_mode = WB_SYNC_ALL,
4789 .nr_to_write = 0,
4790 .range_start = range_start,
4791 .range_end = range_end,
4792 };
4793
Joe Perchesf96637b2013-05-04 22:12:25 -05004794 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004795
4796 if (clear_page_dirty_for_io(page))
4797 rc = cifs_writepage_locked(page, &wbc);
4798
4799 cifs_fscache_invalidate_page(page, page->mapping->host);
4800 return rc;
4801}
4802
Tejun Heo9b646972010-07-20 22:09:02 +02004803void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04004804{
4805 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4806 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00004807 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004808 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004809 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004810 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004811 int rc = 0;
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004812 bool purge_cache = false;
Rohith Surabattulac3f207a2021-04-13 00:26:42 -05004813 bool is_deferred = false;
4814 struct cifs_deferred_close *dclose;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004815
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004816 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10004817 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004818
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004819 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
4820 cfile->oplock_epoch, &purge_cache);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004821
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004822 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004823 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004824 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4825 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004826 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004827 }
4828
Jeff Layton3bc303c2009-09-21 06:47:50 -04004829 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004830 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05004831 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00004832 else
Al Viro8737c932009-12-24 06:47:55 -05004833 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004834 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004835 if (!CIFS_CACHE_READ(cinode) || purge_cache) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004836 rc = filemap_fdatawait(inode->i_mapping);
4837 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004838 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004839 }
Joe Perchesf96637b2013-05-04 22:12:25 -05004840 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004841 if (CIFS_CACHE_WRITE(cinode))
4842 goto oplock_break_ack;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004843 }
4844
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004845 rc = cifs_push_locks(cfile);
4846 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004847 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004848
Pavel Shilovsky9bd45402019-10-29 16:51:19 -07004849oplock_break_ack:
Jeff Layton3bc303c2009-09-21 06:47:50 -04004850 /*
4851 * releasing stale oplock after recent reconnect of smb session using
4852 * a now incorrect file handle is not a data integrity issue but do
4853 * not bother sending an oplock release if session to server still is
4854 * disconnected since oplock already released by the server
4855 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00004856 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004857 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4858 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05004859 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004860 }
Rohith Surabattulac3f207a2021-04-13 00:26:42 -05004861 /*
4862 * When oplock break is received and there are no active
Rohith Surabattula860b69a2021-05-05 10:56:47 +00004863 * file handles but cached, then schedule deferred close immediately.
Rohith Surabattulac3f207a2021-04-13 00:26:42 -05004864 * So, new open will not use cached handle.
4865 */
4866 spin_lock(&CIFS_I(inode)->deferred_lock);
4867 is_deferred = cifs_is_deferred_close(cfile, &dclose);
Rohith Surabattula860b69a2021-05-05 10:56:47 +00004868 if (is_deferred &&
4869 cfile->deferred_close_scheduled &&
4870 delayed_work_pending(&cfile->deferred)) {
Rohith Surabattula9687c852021-05-20 16:45:01 +00004871 /*
4872 * If there is no pending work, mod_delayed_work queues new work.
4873 * So, Increase the ref count to avoid use-after-free.
4874 */
4875 if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
4876 cifsFileInfo_get(cfile);
Rohith Surabattulac3f207a2021-04-13 00:26:42 -05004877 }
4878 spin_unlock(&CIFS_I(inode)->deferred_lock);
Ronnie Sahlberg32546a92019-11-03 13:06:37 +10004879 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004880 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004881}
4882
Steve Frenchdca69282013-11-11 16:42:37 -06004883/*
4884 * The presence of cifs_direct_io() in the address space ops vector
4885 * allowes open() O_DIRECT flags which would have failed otherwise.
4886 *
4887 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4888 * so this method should never be called.
4889 *
4890 * Direct IO is not yet supported in the cached mode.
4891 */
4892static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07004893cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06004894{
4895 /*
4896 * FIXME
4897 * Eventually need to support direct IO for non forcedirectio mounts
4898 */
4899 return -EINVAL;
4900}
4901
Steve French4e8aea32020-04-09 21:42:18 -05004902static int cifs_swap_activate(struct swap_info_struct *sis,
4903 struct file *swap_file, sector_t *span)
4904{
4905 struct cifsFileInfo *cfile = swap_file->private_data;
4906 struct inode *inode = swap_file->f_mapping->host;
4907 unsigned long blocks;
4908 long long isize;
4909
4910 cifs_dbg(FYI, "swap activate\n");
4911
4912 spin_lock(&inode->i_lock);
4913 blocks = inode->i_blocks;
4914 isize = inode->i_size;
4915 spin_unlock(&inode->i_lock);
4916 if (blocks*512 < isize) {
4917 pr_warn("swap activate: swapfile has holes\n");
4918 return -EINVAL;
4919 }
4920 *span = sis->pages;
4921
Joe Perchesa0a30362020-04-14 22:42:53 -07004922 pr_warn_once("Swap support over SMB3 is experimental\n");
Steve French4e8aea32020-04-09 21:42:18 -05004923
4924 /*
4925 * TODO: consider adding ACL (or documenting how) to prevent other
4926 * users (on this or other systems) from reading it
4927 */
4928
4929
4930 /* TODO: add sk_set_memalloc(inet) or similar */
4931
4932 if (cfile)
4933 cfile->swapfile = true;
4934 /*
4935 * TODO: Since file already open, we can't open with DENY_ALL here
4936 * but we could add call to grab a byte range lock to prevent others
4937 * from reading or writing the file
4938 */
4939
4940 return 0;
4941}
4942
4943static void cifs_swap_deactivate(struct file *file)
4944{
4945 struct cifsFileInfo *cfile = file->private_data;
4946
4947 cifs_dbg(FYI, "swap deactivate\n");
4948
4949 /* TODO: undo sk_set_memalloc(inet) will eventually be needed */
4950
4951 if (cfile)
4952 cfile->swapfile = false;
4953
4954 /* do we need to unpin (or unlock) the file */
4955}
Steve Frenchdca69282013-11-11 16:42:37 -06004956
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004957const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004958 .readpage = cifs_readpage,
4959 .readpages = cifs_readpages,
4960 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07004961 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004962 .write_begin = cifs_write_begin,
4963 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304965 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06004966 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304967 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004968 .launder_page = cifs_launder_page,
Steve French4e8aea32020-04-09 21:42:18 -05004969 /*
4970 * TODO: investigate and if useful we could add an cifs_migratePage
4971 * helper (under an CONFIG_MIGRATION) in the future, and also
4972 * investigate and add an is_dirty_writeback helper if needed
4973 */
4974 .swap_activate = cifs_swap_activate,
4975 .swap_deactivate = cifs_swap_deactivate,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004977
4978/*
4979 * cifs_readpages requires the server to support a buffer large enough to
4980 * contain the header plus one complete page of data. Otherwise, we need
4981 * to leave cifs_readpages out of the address space operations.
4982 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004983const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004984 .readpage = cifs_readpage,
4985 .writepage = cifs_writepage,
4986 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004987 .write_begin = cifs_write_begin,
4988 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004989 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304990 .releasepage = cifs_release_page,
4991 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004992 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004993};