blob: 5fbbf99e61f9da0619ab3251087efd3badceecfe [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700180 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700182 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700183 return -ENOSYS;
184
185 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
187/*********************************************************************
188 * open flag mapping table:
189 *
190 * POSIX Flag CIFS Disposition
191 * ---------- ----------------
192 * O_CREAT FILE_OPEN_IF
193 * O_CREAT | O_EXCL FILE_CREATE
194 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
195 * O_TRUNC FILE_OVERWRITE
196 * none of the above FILE_OPEN
197 *
198 * Note that there is not a direct match between disposition
199 * FILE_SUPERSEDE (ie create whether or not file exists although
200 * O_CREAT | O_TRUNC is similar but truncates the existing
201 * file rather than creating a new file as FILE_SUPERSEDE does
202 * (which uses the attributes / metadata passed in on open call)
203 *?
204 *? O_SYNC is a reasonable match to CIFS writethrough flag
205 *? and the read write flags match reasonably. O_LARGEFILE
206 *? is irrelevant because largefile support is always used
207 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
208 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
209 *********************************************************************/
210
211 disposition = cifs_get_disposition(f_flags);
212
213 /* BB pass O_SYNC flag through on file attributes .. BB */
214
215 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
216 if (!buf)
217 return -ENOMEM;
218
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500219 if (backup_cred(cifs_sb))
220 create_options |= CREATE_OPEN_BACKUP_INTENT;
221
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700222 rc = server->ops->open(xid, tcon, full_path, disposition,
223 desired_access, create_options, fid, oplock, buf,
224 cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300225
226 if (rc)
227 goto out;
228
229 if (tcon->unix_ext)
230 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
231 xid);
232 else
233 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700234 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300235
236out:
237 kfree(buf);
238 return rc;
239}
240
Jeff Layton15ecb432010-10-15 15:34:02 -0400241struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700242cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400243 struct tcon_link *tlink, __u32 oplock)
244{
245 struct dentry *dentry = file->f_path.dentry;
246 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700247 struct cifsInodeInfo *cinode = CIFS_I(inode);
248 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700249 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700250 struct cifs_tcon *tcon = tlink_tcon(tlink);
Jeff Layton15ecb432010-10-15 15:34:02 -0400251
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700252 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
253 if (cfile == NULL)
254 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400255
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700256 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
257 if (!fdlocks) {
258 kfree(cfile);
259 return NULL;
260 }
261
262 INIT_LIST_HEAD(&fdlocks->locks);
263 fdlocks->cfile = cfile;
264 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700265 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700266 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700267 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700268
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700269 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700270 cfile->pid = current->tgid;
271 cfile->uid = current_fsuid();
272 cfile->dentry = dget(dentry);
273 cfile->f_flags = file->f_flags;
274 cfile->invalidHandle = false;
275 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700276 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700277 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400278
Jeff Layton44772882010-10-15 15:34:03 -0400279 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700280 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE)
281 oplock = fid->pending_open->oplock;
282 list_del(&fid->pending_open->olist);
283
284 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
285
286 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400287 /* if readable file instance put first in list*/
288 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700289 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400290 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700291 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400292 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400293
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700294 file->private_data = cfile;
295 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400296}
297
Jeff Layton764a1b12012-07-25 14:59:54 -0400298struct cifsFileInfo *
299cifsFileInfo_get(struct cifsFileInfo *cifs_file)
300{
301 spin_lock(&cifs_file_list_lock);
302 cifsFileInfo_get_locked(cifs_file);
303 spin_unlock(&cifs_file_list_lock);
304 return cifs_file;
305}
306
Steve Frenchcdff08e2010-10-21 22:46:14 +0000307/*
308 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400309 * the filehandle out on the server. Must be called without holding
310 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000311 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400312void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
313{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300314 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000315 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700316 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300317 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300318 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000319 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700320 struct cifs_fid fid;
321 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000322
323 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400324 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000325 spin_unlock(&cifs_file_list_lock);
326 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400327 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000328
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700329 if (server->ops->get_lease_key)
330 server->ops->get_lease_key(inode, &fid);
331
332 /* store open in pending opens to make sure we don't miss lease break */
333 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
334
Steve Frenchcdff08e2010-10-21 22:46:14 +0000335 /* remove it from the lists */
336 list_del(&cifs_file->flist);
337 list_del(&cifs_file->tlist);
338
339 if (list_empty(&cifsi->openFileList)) {
340 cFYI(1, "closing last open instance for inode %p",
341 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700342 /*
343 * In strict cache mode we need invalidate mapping on the last
344 * close because it may cause a error when we open this file
345 * again and get at least level II oplock.
346 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300347 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
348 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300349 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000350 }
351 spin_unlock(&cifs_file_list_lock);
352
Jeff Laytonad635942011-07-26 12:20:17 -0400353 cancel_work_sync(&cifs_file->oplock_break);
354
Steve Frenchcdff08e2010-10-21 22:46:14 +0000355 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700356 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400357 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700358
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400359 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700360 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400361 server->ops->close(xid, tcon, &cifs_file->fid);
362 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000363 }
364
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700365 cifs_del_pending_open(&open);
366
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700367 /*
368 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000369 * is closed anyway.
370 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700371 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700372 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000373 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400374 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000375 kfree(li);
376 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700377 list_del(&cifs_file->llist->llist);
378 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700379 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000380
381 cifs_put_tlink(cifs_file->tlink);
382 dput(cifs_file->dentry);
383 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400384}
385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388{
389 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400390 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400391 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700393 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000394 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400395 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700396 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300398 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700399 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700400 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400402 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400405 tlink = cifs_sb_tlink(cifs_sb);
406 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400407 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400408 return PTR_ERR(tlink);
409 }
410 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700411 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800413 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530415 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400416 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 }
418
Joe Perchesb6b38f72010-04-21 03:50:45 +0000419 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
420 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000421
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700422 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000423 oplock = REQ_OPLOCK;
424 else
425 oplock = 0;
426
Steve French64cc2c62009-03-04 19:54:08 +0000427 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400428 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
429 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000430 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400431 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000432 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700433 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000434 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000435 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300436 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000437 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
438 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000439 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000440 " unexpected error on SMB posix open"
441 ", disabling posix open support."
442 " Check if server update available.",
443 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000444 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000445 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000446 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
447 (rc != -EOPNOTSUPP)) /* path not found or net err */
448 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700449 /*
450 * Else fallthrough to retry open the old way on network i/o
451 * or DFS errors.
452 */
Steve French276a74a2009-03-03 18:00:34 +0000453 }
454
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700455 if (server->ops->get_lease_key)
456 server->ops->get_lease_key(inode, &fid);
457
458 cifs_add_pending_open(&fid, tlink, &open);
459
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300460 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700461 if (server->ops->get_lease_key)
462 server->ops->get_lease_key(inode, &fid);
463
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300464 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700465 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700466 if (rc) {
467 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300468 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700469 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300470 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400471
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700472 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
473 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700474 if (server->ops->close)
475 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700476 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 rc = -ENOMEM;
478 goto out;
479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530481 cifs_fscache_set_inode_cookie(inode, file);
482
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300483 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700484 /*
485 * Time to set mode which we can not set earlier due to
486 * problems creating new read-only files.
487 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300488 struct cifs_unix_set_info_args args = {
489 .mode = inode->i_mode,
490 .uid = NO_CHANGE_64,
491 .gid = NO_CHANGE_64,
492 .ctime = NO_CHANGE_64,
493 .atime = NO_CHANGE_64,
494 .mtime = NO_CHANGE_64,
495 .device = 0,
496 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700497 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
498 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 }
500
501out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400503 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400504 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 return rc;
506}
507
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700508/*
509 * Try to reacquire byte range locks that were released when session
510 * to server was lost
511 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512static int cifs_relock_file(struct cifsFileInfo *cifsFile)
513{
514 int rc = 0;
515
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700516 /* BB list all locks open on this file and relock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
518 return rc;
519}
520
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700521static int
522cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523{
524 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400525 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400526 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000528 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700529 struct TCP_Server_Info *server;
530 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000531 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700533 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500535 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700536 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400538 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700539 mutex_lock(&cfile->fh_mutex);
540 if (!cfile->invalidHandle) {
541 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530542 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400543 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530544 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 }
546
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700547 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700549 tcon = tlink_tcon(cfile->tlink);
550 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000551
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700552 /*
553 * Can not grab rename sem here because various ops, including those
554 * that already have the rename sem can end up causing writepage to get
555 * called and if the server was down that means we end up here, and we
556 * can never tell if the caller already has the rename_sem.
557 */
558 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000560 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700561 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400562 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000563 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 }
565
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700566 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
567 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300569 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 oplock = REQ_OPLOCK;
571 else
Steve French4b18f2a2008-04-29 00:06:05 +0000572 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400574 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000575 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400576 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400577 /*
578 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
579 * original open. Must mask them off for a reopen.
580 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700581 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400582 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400583
Jeff Layton2422f672010-06-16 13:40:16 -0400584 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700585 cifs_sb->mnt_file_mode /* ignored */,
586 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000587 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000588 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000589 goto reopen_success;
590 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700591 /*
592 * fallthrough to retry open the old way on errors, especially
593 * in the reconnect path it is important to retry hard
594 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000595 }
596
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700597 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000598
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500599 if (backup_cred(cifs_sb))
600 create_options |= CREATE_OPEN_BACKUP_INTENT;
601
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700602 if (server->ops->get_lease_key)
603 server->ops->get_lease_key(inode, &fid);
604
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700605 /*
606 * Can not refresh inode by passing in file_info buf to be returned by
607 * CIFSSMBOpen and then calling get_inode_info with returned buf since
608 * file might have write behind data that needs to be flushed and server
609 * version of file size can be stale. If we knew for sure that inode was
610 * not dirty locally we could do this.
611 */
612 rc = server->ops->open(xid, tcon, full_path, disposition,
613 desired_access, create_options, &fid, &oplock,
614 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700616 mutex_unlock(&cfile->fh_mutex);
617 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000618 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400619 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 }
Jeff Layton15886172010-10-15 15:33:59 -0400621
622reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700623 cfile->invalidHandle = false;
624 mutex_unlock(&cfile->fh_mutex);
625 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400626
627 if (can_flush) {
628 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400629 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400630
Jeff Layton15886172010-10-15 15:33:59 -0400631 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700632 rc = cifs_get_inode_info_unix(&inode, full_path,
633 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400634 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700635 rc = cifs_get_inode_info(&inode, full_path, NULL,
636 inode->i_sb, xid, NULL);
637 }
638 /*
639 * Else we are writing out data to server already and could deadlock if
640 * we tried to flush data, and since we do not know if we have data that
641 * would invalidate the current end of file on the server we can not go
642 * to the server to get the new inode info.
643 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300644
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700645 server->ops->set_fid(cfile, &fid, oplock);
646 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400647
648reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400650 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 return rc;
652}
653
654int cifs_close(struct inode *inode, struct file *file)
655{
Jeff Layton77970692011-04-05 16:23:47 -0700656 if (file->private_data != NULL) {
657 cifsFileInfo_put(file->private_data);
658 file->private_data = NULL;
659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
Steve Frenchcdff08e2010-10-21 22:46:14 +0000661 /* return code from the ->release op is always ignored */
662 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663}
664
665int cifs_closedir(struct inode *inode, struct file *file)
666{
667 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400668 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700669 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700670 struct cifs_tcon *tcon;
671 struct TCP_Server_Info *server;
672 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673
Joe Perchesb6b38f72010-04-21 03:50:45 +0000674 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700676 if (cfile == NULL)
677 return rc;
678
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400679 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700680 tcon = tlink_tcon(cfile->tlink);
681 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700683 cFYI(1, "Freeing private data in close dir");
684 spin_lock(&cifs_file_list_lock);
685 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
686 cfile->invalidHandle = true;
687 spin_unlock(&cifs_file_list_lock);
688 if (server->ops->close_dir)
689 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
690 else
691 rc = -ENOSYS;
692 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
693 /* not much we can do if it fails anyway, ignore rc */
694 rc = 0;
695 } else
696 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700698 buf = cfile->srch_inf.ntwrk_buf_start;
699 if (buf) {
700 cFYI(1, "closedir free smb buf in srch struct");
701 cfile->srch_inf.ntwrk_buf_start = NULL;
702 if (cfile->srch_inf.smallBuf)
703 cifs_small_buf_release(buf);
704 else
705 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700707
708 cifs_put_tlink(cfile->tlink);
709 kfree(file->private_data);
710 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400712 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 return rc;
714}
715
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400716static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300717cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000718{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400719 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000720 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400721 if (!lock)
722 return lock;
723 lock->offset = offset;
724 lock->length = length;
725 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400726 lock->pid = current->tgid;
727 INIT_LIST_HEAD(&lock->blist);
728 init_waitqueue_head(&lock->block_q);
729 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400730}
731
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700732void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400733cifs_del_lock_waiters(struct cifsLockInfo *lock)
734{
735 struct cifsLockInfo *li, *tmp;
736 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
737 list_del_init(&li->blist);
738 wake_up(&li->block_q);
739 }
740}
741
742static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700743cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
744 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700745 struct cifsLockInfo **conf_lock, bool rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400746{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300747 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700748 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300749 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400750
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700751 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400752 if (offset + length <= li->offset ||
753 offset >= li->offset + li->length)
754 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700755 if (rw_check && server->ops->compare_fids(cfile, cur_cfile) &&
756 current->tgid == li->pid)
757 continue;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700758 if ((type & server->vals->shared_lock_type) &&
759 ((server->ops->compare_fids(cfile, cur_cfile) &&
760 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400761 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700762 if (conf_lock)
763 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700764 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400765 }
766 return false;
767}
768
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700769bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300770cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700771 __u8 type, struct cifsLockInfo **conf_lock,
772 bool rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400773{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300774 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700775 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300776 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300777
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700778 list_for_each_entry(cur, &cinode->llist, llist) {
779 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700780 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300781 if (rc)
782 break;
783 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300784
785 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400786}
787
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300788/*
789 * Check if there is another lock that prevents us to set the lock (mandatory
790 * style). If such a lock exists, update the flock structure with its
791 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
792 * or leave it the same if we can't. Returns 0 if we don't need to request to
793 * the server or 1 otherwise.
794 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400795static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300796cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
797 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400798{
799 int rc = 0;
800 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300801 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300802 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400803 bool exist;
804
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700805 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400806
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300807 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700808 &conf_lock, false);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400809 if (exist) {
810 flock->fl_start = conf_lock->offset;
811 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
812 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300813 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400814 flock->fl_type = F_RDLCK;
815 else
816 flock->fl_type = F_WRLCK;
817 } else if (!cinode->can_cache_brlcks)
818 rc = 1;
819 else
820 flock->fl_type = F_UNLCK;
821
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700822 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400823 return rc;
824}
825
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400826static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300827cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400828{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300829 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700830 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700831 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700832 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000833}
834
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300835/*
836 * Set the byte-range lock (mandatory style). Returns:
837 * 1) 0, if we set the lock and don't need to request to the server;
838 * 2) 1, if no locks prevent us but we need to request to the server;
839 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
840 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400841static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300842cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400843 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400844{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400845 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300846 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400847 bool exist;
848 int rc = 0;
849
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400850try_again:
851 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700852 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400853
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300854 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700855 lock->type, &conf_lock, false);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400856 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700857 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700858 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400859 return rc;
860 }
861
862 if (!exist)
863 rc = 1;
864 else if (!wait)
865 rc = -EACCES;
866 else {
867 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700868 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400869 rc = wait_event_interruptible(lock->block_q,
870 (lock->blist.prev == &lock->blist) &&
871 (lock->blist.next == &lock->blist));
872 if (!rc)
873 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700874 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400875 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400876 }
877
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700878 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400879 return rc;
880}
881
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300882/*
883 * Check if there is another lock that prevents us to set the lock (posix
884 * style). If such a lock exists, update the flock structure with its
885 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
886 * or leave it the same if we can't. Returns 0 if we don't need to request to
887 * the server or 1 otherwise.
888 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400889static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400890cifs_posix_lock_test(struct file *file, struct file_lock *flock)
891{
892 int rc = 0;
893 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
894 unsigned char saved_type = flock->fl_type;
895
Pavel Shilovsky50792762011-10-29 17:17:57 +0400896 if ((flock->fl_flags & FL_POSIX) == 0)
897 return 1;
898
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700899 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400900 posix_test_lock(file, flock);
901
902 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
903 flock->fl_type = saved_type;
904 rc = 1;
905 }
906
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700907 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400908 return rc;
909}
910
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300911/*
912 * Set the byte-range lock (posix style). Returns:
913 * 1) 0, if we set the lock and don't need to request to the server;
914 * 2) 1, if we need to request to the server;
915 * 3) <0, if the error occurs while setting the lock.
916 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400917static int
918cifs_posix_lock_set(struct file *file, struct file_lock *flock)
919{
920 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400921 int rc = 1;
922
923 if ((flock->fl_flags & FL_POSIX) == 0)
924 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400925
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400926try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700927 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400928 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700929 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400930 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400931 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400932
933 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700934 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400935 if (rc == FILE_LOCK_DEFERRED) {
936 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
937 if (!rc)
938 goto try_again;
939 locks_delete_block(flock);
940 }
Steve French9ebb3892012-04-01 13:52:54 -0500941 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400942}
943
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -0700944int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400945cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400946{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400947 unsigned int xid;
948 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400949 struct cifsLockInfo *li, *tmp;
950 struct cifs_tcon *tcon;
951 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400952 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400953 LOCKING_ANDX_RANGE *buf, *cur;
954 int types[] = {LOCKING_ANDX_LARGE_FILES,
955 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
956 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400957
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400958 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400959 tcon = tlink_tcon(cfile->tlink);
960
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700961 /* we are going to update can_cache_brlcks here - need a write access */
962 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400963 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700964 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400965 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400966 return rc;
967 }
968
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400969 /*
970 * Accessing maxBuf is racy with cifs_reconnect - need to store value
971 * and check it for zero before using.
972 */
973 max_buf = tcon->ses->server->maxBuf;
974 if (!max_buf) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700975 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400976 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400977 return -EINVAL;
978 }
979
980 max_num = (max_buf - sizeof(struct smb_hdr)) /
981 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400982 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
983 if (!buf) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700984 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400985 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +0400986 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400987 }
988
989 for (i = 0; i < 2; i++) {
990 cur = buf;
991 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700992 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400993 if (li->type != types[i])
994 continue;
995 cur->Pid = cpu_to_le16(li->pid);
996 cur->LengthLow = cpu_to_le32((u32)li->length);
997 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
998 cur->OffsetLow = cpu_to_le32((u32)li->offset);
999 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1000 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001001 stored_rc = cifs_lockv(xid, tcon,
1002 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001003 (__u8)li->type, 0, num,
1004 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001005 if (stored_rc)
1006 rc = stored_rc;
1007 cur = buf;
1008 num = 0;
1009 } else
1010 cur++;
1011 }
1012
1013 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001014 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001015 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001016 if (stored_rc)
1017 rc = stored_rc;
1018 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001019 }
1020
1021 cinode->can_cache_brlcks = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001022 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001023
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001024 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001025 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001026 return rc;
1027}
1028
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001029/* copied from fs/locks.c with a name change */
1030#define cifs_for_each_lock(inode, lockp) \
1031 for (lockp = &inode->i_flock; *lockp != NULL; \
1032 lockp = &(*lockp)->fl_next)
1033
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001034struct lock_to_push {
1035 struct list_head llist;
1036 __u64 offset;
1037 __u64 length;
1038 __u32 pid;
1039 __u16 netfid;
1040 __u8 type;
1041};
1042
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001043static int
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001044cifs_push_posix_locks_locked(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001045{
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001046 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1047 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001048 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001049 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001050 struct list_head locks_to_send, *el;
1051 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001052 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001053
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001054 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001055
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001056 lock_flocks();
1057 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001058 if ((*before)->fl_flags & FL_POSIX)
1059 count++;
1060 }
1061 unlock_flocks();
1062
1063 INIT_LIST_HEAD(&locks_to_send);
1064
1065 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001066 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001067 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001068 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001069 */
1070 for (; i < count; i++) {
1071 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1072 if (!lck) {
1073 rc = -ENOMEM;
1074 goto err_out;
1075 }
1076 list_add_tail(&lck->llist, &locks_to_send);
1077 }
1078
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001079 el = locks_to_send.next;
1080 lock_flocks();
1081 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001082 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001083 if ((flock->fl_flags & FL_POSIX) == 0)
1084 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001085 if (el == &locks_to_send) {
1086 /*
1087 * The list ended. We don't have enough allocated
1088 * structures - something is really wrong.
1089 */
1090 cERROR(1, "Can't push all brlocks!");
1091 break;
1092 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001093 length = 1 + flock->fl_end - flock->fl_start;
1094 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1095 type = CIFS_RDLCK;
1096 else
1097 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001098 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001099 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001100 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001101 lck->length = length;
1102 lck->type = type;
1103 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001104 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001105 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001106 unlock_flocks();
1107
1108 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001109 int stored_rc;
1110
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001111 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001112 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001113 lck->type, 0);
1114 if (stored_rc)
1115 rc = stored_rc;
1116 list_del(&lck->llist);
1117 kfree(lck);
1118 }
1119
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001120out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001121 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001122 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001123err_out:
1124 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1125 list_del(&lck->llist);
1126 kfree(lck);
1127 }
1128 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001129}
1130
1131static int
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001132cifs_push_posix_locks(struct cifsFileInfo *cfile)
1133{
1134 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1135 int rc = 0;
1136
1137 /* we are going to update can_cache_brlcks here - need a write access */
1138 down_write(&cinode->lock_sem);
1139 if (!cinode->can_cache_brlcks) {
1140 up_write(&cinode->lock_sem);
1141 return rc;
1142 }
1143 rc = cifs_push_posix_locks_locked(cfile);
1144 cinode->can_cache_brlcks = false;
1145 up_write(&cinode->lock_sem);
1146 return rc;
1147}
1148
1149static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001150cifs_push_locks(struct cifsFileInfo *cfile)
1151{
1152 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1153 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1154
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001155 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001156 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1157 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1158 return cifs_push_posix_locks(cfile);
1159
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001160 return tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001161}
1162
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001163static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001164cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001165 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001167 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001168 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001169 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001170 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001171 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001172 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001173 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001175 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001176 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001177 "not implemented yet");
1178 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001179 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001180 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001181 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1182 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001183 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001185 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001186 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001187 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001188 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001189 *lock = 1;
1190 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001191 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001192 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001193 *unlock = 1;
1194 /* Check if unlock includes more than one lock range */
1195 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001196 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001197 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001198 *lock = 1;
1199 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001200 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001201 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001202 *lock = 1;
1203 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001204 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001205 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001206 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001208 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001209}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001211static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001212cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001213 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001214{
1215 int rc = 0;
1216 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001217 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1218 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001219 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001220 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001222 if (posix_lck) {
1223 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001224
1225 rc = cifs_posix_lock_test(file, flock);
1226 if (!rc)
1227 return rc;
1228
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001229 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001230 posix_lock_type = CIFS_RDLCK;
1231 else
1232 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001233 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001234 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001235 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 return rc;
1237 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001238
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001239 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001240 if (!rc)
1241 return rc;
1242
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001243 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001244 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1245 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001246 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001247 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1248 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001249 flock->fl_type = F_UNLCK;
1250 if (rc != 0)
1251 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001252 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001253 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001254 }
1255
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001256 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001257 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001258 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001259 }
1260
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001261 type &= ~server->vals->exclusive_lock_type;
1262
1263 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1264 type | server->vals->shared_lock_type,
1265 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001266 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001267 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1268 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001269 flock->fl_type = F_RDLCK;
1270 if (rc != 0)
1271 cERROR(1, "Error unlocking previously locked "
1272 "range %d during test of lock", rc);
1273 } else
1274 flock->fl_type = F_WRLCK;
1275
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001276 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001277}
1278
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001279void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001280cifs_move_llist(struct list_head *source, struct list_head *dest)
1281{
1282 struct list_head *li, *tmp;
1283 list_for_each_safe(li, tmp, source)
1284 list_move(li, dest);
1285}
1286
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001287void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001288cifs_free_llist(struct list_head *llist)
1289{
1290 struct cifsLockInfo *li, *tmp;
1291 list_for_each_entry_safe(li, tmp, llist, llist) {
1292 cifs_del_lock_waiters(li);
1293 list_del(&li->llist);
1294 kfree(li);
1295 }
1296}
1297
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001298int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001299cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1300 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001301{
1302 int rc = 0, stored_rc;
1303 int types[] = {LOCKING_ANDX_LARGE_FILES,
1304 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1305 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001306 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001307 LOCKING_ANDX_RANGE *buf, *cur;
1308 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1309 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1310 struct cifsLockInfo *li, *tmp;
1311 __u64 length = 1 + flock->fl_end - flock->fl_start;
1312 struct list_head tmp_llist;
1313
1314 INIT_LIST_HEAD(&tmp_llist);
1315
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001316 /*
1317 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1318 * and check it for zero before using.
1319 */
1320 max_buf = tcon->ses->server->maxBuf;
1321 if (!max_buf)
1322 return -EINVAL;
1323
1324 max_num = (max_buf - sizeof(struct smb_hdr)) /
1325 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001326 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1327 if (!buf)
1328 return -ENOMEM;
1329
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001330 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001331 for (i = 0; i < 2; i++) {
1332 cur = buf;
1333 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001334 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001335 if (flock->fl_start > li->offset ||
1336 (flock->fl_start + length) <
1337 (li->offset + li->length))
1338 continue;
1339 if (current->tgid != li->pid)
1340 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001341 if (types[i] != li->type)
1342 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001343 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001344 /*
1345 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001346 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001347 */
1348 list_del(&li->llist);
1349 cifs_del_lock_waiters(li);
1350 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001351 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001352 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001353 cur->Pid = cpu_to_le16(li->pid);
1354 cur->LengthLow = cpu_to_le32((u32)li->length);
1355 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1356 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1357 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1358 /*
1359 * We need to save a lock here to let us add it again to
1360 * the file's list if the unlock range request fails on
1361 * the server.
1362 */
1363 list_move(&li->llist, &tmp_llist);
1364 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001365 stored_rc = cifs_lockv(xid, tcon,
1366 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001367 li->type, num, 0, buf);
1368 if (stored_rc) {
1369 /*
1370 * We failed on the unlock range
1371 * request - add all locks from the tmp
1372 * list to the head of the file's list.
1373 */
1374 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001375 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001376 rc = stored_rc;
1377 } else
1378 /*
1379 * The unlock range request succeed -
1380 * free the tmp list.
1381 */
1382 cifs_free_llist(&tmp_llist);
1383 cur = buf;
1384 num = 0;
1385 } else
1386 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001387 }
1388 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001389 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001390 types[i], num, 0, buf);
1391 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001392 cifs_move_llist(&tmp_llist,
1393 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001394 rc = stored_rc;
1395 } else
1396 cifs_free_llist(&tmp_llist);
1397 }
1398 }
1399
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001400 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001401 kfree(buf);
1402 return rc;
1403}
1404
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001405static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001406cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001407 bool wait_flag, bool posix_lck, int lock, int unlock,
1408 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001409{
1410 int rc = 0;
1411 __u64 length = 1 + flock->fl_end - flock->fl_start;
1412 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1413 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001414 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001415
1416 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001417 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001418
1419 rc = cifs_posix_lock_set(file, flock);
1420 if (!rc || rc < 0)
1421 return rc;
1422
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001423 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001424 posix_lock_type = CIFS_RDLCK;
1425 else
1426 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001427
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001428 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001429 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001430
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001431 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1432 current->tgid, flock->fl_start, length,
1433 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001434 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001435 }
1436
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001437 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001438 struct cifsLockInfo *lock;
1439
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001440 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001441 if (!lock)
1442 return -ENOMEM;
1443
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001444 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001445 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001446 kfree(lock);
1447 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001448 goto out;
1449
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001450 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1451 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001452 if (rc) {
1453 kfree(lock);
1454 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001455 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001456
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001457 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001458 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001459 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001460
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001461out:
1462 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001463 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001464 return rc;
1465}
1466
1467int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1468{
1469 int rc, xid;
1470 int lock = 0, unlock = 0;
1471 bool wait_flag = false;
1472 bool posix_lck = false;
1473 struct cifs_sb_info *cifs_sb;
1474 struct cifs_tcon *tcon;
1475 struct cifsInodeInfo *cinode;
1476 struct cifsFileInfo *cfile;
1477 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001478 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001479
1480 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001481 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001482
1483 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1484 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1485 flock->fl_start, flock->fl_end);
1486
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001487 cfile = (struct cifsFileInfo *)file->private_data;
1488 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001489
1490 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1491 tcon->ses->server);
1492
1493 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001494 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001495 cinode = CIFS_I(file->f_path.dentry->d_inode);
1496
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001497 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001498 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1499 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1500 posix_lck = true;
1501 /*
1502 * BB add code here to normalize offset and length to account for
1503 * negative length which we can not accept over the wire.
1504 */
1505 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001506 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001507 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001508 return rc;
1509 }
1510
1511 if (!lock && !unlock) {
1512 /*
1513 * if no lock or unlock then nothing to do since we do not
1514 * know what it is
1515 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001516 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001517 return -EOPNOTSUPP;
1518 }
1519
1520 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1521 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001522 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 return rc;
1524}
1525
Jeff Layton597b0272012-03-23 14:40:56 -04001526/*
1527 * update the file size (if needed) after a write. Should be called with
1528 * the inode->i_lock held
1529 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001530void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001531cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1532 unsigned int bytes_written)
1533{
1534 loff_t end_of_write = offset + bytes_written;
1535
1536 if (end_of_write > cifsi->server_eof)
1537 cifsi->server_eof = end_of_write;
1538}
1539
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001540static ssize_t
1541cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1542 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543{
1544 int rc = 0;
1545 unsigned int bytes_written = 0;
1546 unsigned int total_written;
1547 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001548 struct cifs_tcon *tcon;
1549 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001550 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001551 struct dentry *dentry = open_file->dentry;
1552 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001553 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554
Jeff Layton7da4b492010-10-15 15:34:00 -04001555 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556
Joe Perchesb6b38f72010-04-21 03:50:45 +00001557 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001558 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001560 tcon = tlink_tcon(open_file->tlink);
1561 server = tcon->ses->server;
1562
1563 if (!server->ops->sync_write)
1564 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001565
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001566 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 for (total_written = 0; write_size > total_written;
1569 total_written += bytes_written) {
1570 rc = -EAGAIN;
1571 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001572 struct kvec iov[2];
1573 unsigned int len;
1574
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 /* we could deadlock if we called
1577 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001578 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001580 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 if (rc != 0)
1582 break;
1583 }
Steve French3e844692005-10-03 13:37:24 -07001584
Jeff Laytonca83ce32011-04-12 09:13:44 -04001585 len = min((size_t)cifs_sb->wsize,
1586 write_size - total_written);
1587 /* iov[0] is reserved for smb header */
1588 iov[1].iov_base = (char *)write_data + total_written;
1589 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001590 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001591 io_parms.tcon = tcon;
1592 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001593 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001594 rc = server->ops->sync_write(xid, open_file, &io_parms,
1595 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 }
1597 if (rc || (bytes_written == 0)) {
1598 if (total_written)
1599 break;
1600 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001601 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 return rc;
1603 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001604 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001605 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001606 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001607 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001608 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001609 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 }
1611
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001612 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613
Jeff Layton7da4b492010-10-15 15:34:00 -04001614 if (total_written > 0) {
1615 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001616 if (*offset > dentry->d_inode->i_size)
1617 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001618 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001620 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001621 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 return total_written;
1623}
1624
Jeff Layton6508d902010-09-29 19:51:11 -04001625struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1626 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001627{
1628 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001629 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1630
1631 /* only filter by fsuid on multiuser mounts */
1632 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1633 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001634
Jeff Layton44772882010-10-15 15:34:03 -04001635 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001636 /* we could simply get the first_list_entry since write-only entries
1637 are always at the end of the list but since the first entry might
1638 have a close pending, we go through the whole list */
1639 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001640 if (fsuid_only && open_file->uid != current_fsuid())
1641 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001642 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001643 if (!open_file->invalidHandle) {
1644 /* found a good file */
1645 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001646 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001647 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001648 return open_file;
1649 } /* else might as well continue, and look for
1650 another, or simply have the caller reopen it
1651 again rather than trying to fix this handle */
1652 } else /* write only file */
1653 break; /* write only files are last so must be done */
1654 }
Jeff Layton44772882010-10-15 15:34:03 -04001655 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001656 return NULL;
1657}
Steve French630f3f0c2007-10-25 21:17:17 +00001658
Jeff Layton6508d902010-09-29 19:51:11 -04001659struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1660 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001661{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001662 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001663 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001664 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001665 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001666 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001667
Steve French60808232006-04-22 15:53:05 +00001668 /* Having a null inode here (because mapping->host was set to zero by
1669 the VFS or MM) should not happen but we had reports of on oops (due to
1670 it being zero) during stress testcases so we need to check for it */
1671
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001672 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001673 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001674 dump_stack();
1675 return NULL;
1676 }
1677
Jeff Laytond3892292010-11-02 16:22:50 -04001678 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1679
Jeff Layton6508d902010-09-29 19:51:11 -04001680 /* only filter by fsuid on multiuser mounts */
1681 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1682 fsuid_only = false;
1683
Jeff Layton44772882010-10-15 15:34:03 -04001684 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001685refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001686 if (refind > MAX_REOPEN_ATT) {
1687 spin_unlock(&cifs_file_list_lock);
1688 return NULL;
1689 }
Steve French6148a742005-10-05 12:23:19 -07001690 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001691 if (!any_available && open_file->pid != current->tgid)
1692 continue;
1693 if (fsuid_only && open_file->uid != current_fsuid())
1694 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001695 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001696 if (!open_file->invalidHandle) {
1697 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001698 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001699 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001700 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001701 } else {
1702 if (!inv_file)
1703 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001704 }
Steve French6148a742005-10-05 12:23:19 -07001705 }
1706 }
Jeff Layton2846d382008-09-22 21:33:33 -04001707 /* couldn't find useable FH with same pid, try any available */
1708 if (!any_available) {
1709 any_available = true;
1710 goto refind_writable;
1711 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001712
1713 if (inv_file) {
1714 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001715 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001716 }
1717
Jeff Layton44772882010-10-15 15:34:03 -04001718 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001719
1720 if (inv_file) {
1721 rc = cifs_reopen_file(inv_file, false);
1722 if (!rc)
1723 return inv_file;
1724 else {
1725 spin_lock(&cifs_file_list_lock);
1726 list_move_tail(&inv_file->flist,
1727 &cifs_inode->openFileList);
1728 spin_unlock(&cifs_file_list_lock);
1729 cifsFileInfo_put(inv_file);
1730 spin_lock(&cifs_file_list_lock);
1731 ++refind;
1732 goto refind_writable;
1733 }
1734 }
1735
Steve French6148a742005-10-05 12:23:19 -07001736 return NULL;
1737}
1738
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1740{
1741 struct address_space *mapping = page->mapping;
1742 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1743 char *write_data;
1744 int rc = -EFAULT;
1745 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001747 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
1749 if (!mapping || !mapping->host)
1750 return -EFAULT;
1751
1752 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753
1754 offset += (loff_t)from;
1755 write_data = kmap(page);
1756 write_data += from;
1757
1758 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1759 kunmap(page);
1760 return -EIO;
1761 }
1762
1763 /* racing with truncate? */
1764 if (offset > mapping->host->i_size) {
1765 kunmap(page);
1766 return 0; /* don't care */
1767 }
1768
1769 /* check to make sure that we are not extending the file */
1770 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001771 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772
Jeff Layton6508d902010-09-29 19:51:11 -04001773 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001774 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001775 bytes_written = cifs_write(open_file, open_file->pid,
1776 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001777 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001779 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001780 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001781 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001782 else if (bytes_written < 0)
1783 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001784 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001785 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 rc = -EIO;
1787 }
1788
1789 kunmap(page);
1790 return rc;
1791}
1792
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001794 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001796 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1797 bool done = false, scanned = false, range_whole = false;
1798 pgoff_t end, index;
1799 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001800 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001801 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001802 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001803
Steve French37c0eb42005-10-05 14:50:29 -07001804 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001805 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001806 * one page at a time via cifs_writepage
1807 */
1808 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1809 return generic_writepages(mapping, wbc);
1810
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001811 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001812 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001813 end = -1;
1814 } else {
1815 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1816 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1817 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001818 range_whole = true;
1819 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001820 }
1821retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001822 while (!done && index <= end) {
1823 unsigned int i, nr_pages, found_pages;
1824 pgoff_t next = 0, tofind;
1825 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001826
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001827 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1828 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001829
Jeff Laytonc2e87642012-03-23 14:40:55 -04001830 wdata = cifs_writedata_alloc((unsigned int)tofind,
1831 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001832 if (!wdata) {
1833 rc = -ENOMEM;
1834 break;
1835 }
1836
1837 /*
1838 * find_get_pages_tag seems to return a max of 256 on each
1839 * iteration, so we must call it several times in order to
1840 * fill the array or the wsize is effectively limited to
1841 * 256 * PAGE_CACHE_SIZE.
1842 */
1843 found_pages = 0;
1844 pages = wdata->pages;
1845 do {
1846 nr_pages = find_get_pages_tag(mapping, &index,
1847 PAGECACHE_TAG_DIRTY,
1848 tofind, pages);
1849 found_pages += nr_pages;
1850 tofind -= nr_pages;
1851 pages += nr_pages;
1852 } while (nr_pages && tofind && index <= end);
1853
1854 if (found_pages == 0) {
1855 kref_put(&wdata->refcount, cifs_writedata_release);
1856 break;
1857 }
1858
1859 nr_pages = 0;
1860 for (i = 0; i < found_pages; i++) {
1861 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001862 /*
1863 * At this point we hold neither mapping->tree_lock nor
1864 * lock on the page itself: the page may be truncated or
1865 * invalidated (changing page->mapping to NULL), or even
1866 * swizzled back from swapper_space to tmpfs file
1867 * mapping
1868 */
1869
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001870 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001871 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001872 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001873 break;
1874
1875 if (unlikely(page->mapping != mapping)) {
1876 unlock_page(page);
1877 break;
1878 }
1879
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001880 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001881 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001882 unlock_page(page);
1883 break;
1884 }
1885
1886 if (next && (page->index != next)) {
1887 /* Not next consecutive page */
1888 unlock_page(page);
1889 break;
1890 }
1891
1892 if (wbc->sync_mode != WB_SYNC_NONE)
1893 wait_on_page_writeback(page);
1894
1895 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001896 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001897 unlock_page(page);
1898 break;
1899 }
Steve French84d2f072005-10-12 15:32:05 -07001900
Linus Torvaldscb876f42006-12-23 16:19:07 -08001901 /*
1902 * This actually clears the dirty bit in the radix tree.
1903 * See cifs_writepage() for more commentary.
1904 */
1905 set_page_writeback(page);
1906
Jeff Layton3a98b862012-11-26 09:48:41 -05001907 if (page_offset(page) >= i_size_read(mapping->host)) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001908 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001909 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001910 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001911 break;
1912 }
1913
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001914 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001915 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001916 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001917 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001918
1919 /* reset index to refind any pages skipped */
1920 if (nr_pages == 0)
1921 index = wdata->pages[0]->index + 1;
1922
1923 /* put any pages we aren't going to use */
1924 for (i = nr_pages; i < found_pages; i++) {
1925 page_cache_release(wdata->pages[i]);
1926 wdata->pages[i] = NULL;
1927 }
1928
1929 /* nothing to write? */
1930 if (nr_pages == 0) {
1931 kref_put(&wdata->refcount, cifs_writedata_release);
1932 continue;
1933 }
1934
1935 wdata->sync_mode = wbc->sync_mode;
1936 wdata->nr_pages = nr_pages;
1937 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001938 wdata->pagesz = PAGE_CACHE_SIZE;
1939 wdata->tailsz =
Jeff Layton3a98b862012-11-26 09:48:41 -05001940 min(i_size_read(mapping->host) -
1941 page_offset(wdata->pages[nr_pages - 1]),
Jeff Laytoneddb0792012-09-18 16:20:35 -07001942 (loff_t)PAGE_CACHE_SIZE);
1943 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1944 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001945
1946 do {
1947 if (wdata->cfile != NULL)
1948 cifsFileInfo_put(wdata->cfile);
1949 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1950 false);
1951 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001952 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001953 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001954 break;
Steve French37c0eb42005-10-05 14:50:29 -07001955 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001956 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001957 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1958 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001959 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001960
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001961 for (i = 0; i < nr_pages; ++i)
1962 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001963
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001964 /* send failure -- clean up the mess */
1965 if (rc != 0) {
1966 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001967 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001968 redirty_page_for_writepage(wbc,
1969 wdata->pages[i]);
1970 else
1971 SetPageError(wdata->pages[i]);
1972 end_page_writeback(wdata->pages[i]);
1973 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001974 }
Jeff Layton941b8532011-01-11 07:24:01 -05001975 if (rc != -EAGAIN)
1976 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001977 }
1978 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001979
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001980 wbc->nr_to_write -= nr_pages;
1981 if (wbc->nr_to_write <= 0)
1982 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001983
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001984 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001985 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001986
Steve French37c0eb42005-10-05 14:50:29 -07001987 if (!scanned && !done) {
1988 /*
1989 * We hit the last page and there is more work to be done: wrap
1990 * back to the start of the file
1991 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001992 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001993 index = 0;
1994 goto retry;
1995 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001996
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001997 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001998 mapping->writeback_index = index;
1999
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 return rc;
2001}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002003static int
2004cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002006 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002007 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002009 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010/* BB add check for wbc flags */
2011 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002012 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00002013 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002014
2015 /*
2016 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2017 *
2018 * A writepage() implementation always needs to do either this,
2019 * or re-dirty the page with "redirty_page_for_writepage()" in
2020 * the case of a failure.
2021 *
2022 * Just unlocking the page will cause the radix tree tag-bits
2023 * to fail to update with the state of the page correctly.
2024 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002025 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002026retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002028 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2029 goto retry_write;
2030 else if (rc == -EAGAIN)
2031 redirty_page_for_writepage(wbc, page);
2032 else if (rc != 0)
2033 SetPageError(page);
2034 else
2035 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002036 end_page_writeback(page);
2037 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002038 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 return rc;
2040}
2041
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002042static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2043{
2044 int rc = cifs_writepage_locked(page, wbc);
2045 unlock_page(page);
2046 return rc;
2047}
2048
Nick Piggind9414772008-09-24 11:32:59 -04002049static int cifs_write_end(struct file *file, struct address_space *mapping,
2050 loff_t pos, unsigned len, unsigned copied,
2051 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052{
Nick Piggind9414772008-09-24 11:32:59 -04002053 int rc;
2054 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002055 struct cifsFileInfo *cfile = file->private_data;
2056 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2057 __u32 pid;
2058
2059 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2060 pid = cfile->pid;
2061 else
2062 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063
Joe Perchesb6b38f72010-04-21 03:50:45 +00002064 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2065 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002066
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002067 if (PageChecked(page)) {
2068 if (copied == len)
2069 SetPageUptodate(page);
2070 ClearPageChecked(page);
2071 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002072 SetPageUptodate(page);
2073
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002075 char *page_data;
2076 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002077 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002078
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002079 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 /* this is probably better than directly calling
2081 partialpage_write since in this function the file handle is
2082 known which we might as well leverage */
2083 /* BB check if anything else missing out of ppw
2084 such as updating last write time */
2085 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002086 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002087 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002089
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002090 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002091 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002092 rc = copied;
2093 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 set_page_dirty(page);
2095 }
2096
Nick Piggind9414772008-09-24 11:32:59 -04002097 if (rc > 0) {
2098 spin_lock(&inode->i_lock);
2099 if (pos > inode->i_size)
2100 i_size_write(inode, pos);
2101 spin_unlock(&inode->i_lock);
2102 }
2103
2104 unlock_page(page);
2105 page_cache_release(page);
2106
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 return rc;
2108}
2109
Josef Bacik02c24a82011-07-16 20:44:56 -04002110int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2111 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002113 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002115 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002116 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002117 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002118 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002119 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120
Josef Bacik02c24a82011-07-16 20:44:56 -04002121 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2122 if (rc)
2123 return rc;
2124 mutex_lock(&inode->i_mutex);
2125
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002126 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127
Joe Perchesb6b38f72010-04-21 03:50:45 +00002128 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002129 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002130
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002131 if (!CIFS_I(inode)->clientCanCacheRead) {
2132 rc = cifs_invalidate_mapping(inode);
2133 if (rc) {
2134 cFYI(1, "rc: %d during invalidate phase", rc);
2135 rc = 0; /* don't care about it in fsync */
2136 }
2137 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002138
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002139 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002140 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2141 server = tcon->ses->server;
2142 if (server->ops->flush)
2143 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2144 else
2145 rc = -ENOSYS;
2146 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002147
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002148 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002149 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002150 return rc;
2151}
2152
Josef Bacik02c24a82011-07-16 20:44:56 -04002153int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002154{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002155 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002156 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002157 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002158 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002159 struct cifsFileInfo *smbfile = file->private_data;
2160 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002161 struct inode *inode = file->f_mapping->host;
2162
2163 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2164 if (rc)
2165 return rc;
2166 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002167
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002168 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002169
2170 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2171 file->f_path.dentry->d_name.name, datasync);
2172
2173 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002174 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2175 server = tcon->ses->server;
2176 if (server->ops->flush)
2177 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2178 else
2179 rc = -ENOSYS;
2180 }
Steve Frenchb298f222009-02-21 21:17:43 +00002181
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002182 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002183 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 return rc;
2185}
2186
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187/*
2188 * As file closes, flush all cached write data for this inode checking
2189 * for write behind errors.
2190 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002191int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002193 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 int rc = 0;
2195
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002196 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002197 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002198
Joe Perchesb6b38f72010-04-21 03:50:45 +00002199 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200
2201 return rc;
2202}
2203
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002204static int
2205cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2206{
2207 int rc = 0;
2208 unsigned long i;
2209
2210 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002211 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002212 if (!pages[i]) {
2213 /*
2214 * save number of pages we have already allocated and
2215 * return with ENOMEM error
2216 */
2217 num_pages = i;
2218 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002219 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002220 }
2221 }
2222
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002223 if (rc) {
2224 for (i = 0; i < num_pages; i++)
2225 put_page(pages[i]);
2226 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002227 return rc;
2228}
2229
2230static inline
2231size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2232{
2233 size_t num_pages;
2234 size_t clen;
2235
2236 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002237 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002238
2239 if (cur_len)
2240 *cur_len = clen;
2241
2242 return num_pages;
2243}
2244
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002245static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002246cifs_uncached_writev_complete(struct work_struct *work)
2247{
2248 int i;
2249 struct cifs_writedata *wdata = container_of(work,
2250 struct cifs_writedata, work);
2251 struct inode *inode = wdata->cfile->dentry->d_inode;
2252 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2253
2254 spin_lock(&inode->i_lock);
2255 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2256 if (cifsi->server_eof > inode->i_size)
2257 i_size_write(inode, cifsi->server_eof);
2258 spin_unlock(&inode->i_lock);
2259
2260 complete(&wdata->done);
2261
2262 if (wdata->result != -EAGAIN) {
2263 for (i = 0; i < wdata->nr_pages; i++)
2264 put_page(wdata->pages[i]);
2265 }
2266
2267 kref_put(&wdata->refcount, cifs_writedata_release);
2268}
2269
2270/* attempt to send write to server, retry on any -EAGAIN errors */
2271static int
2272cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2273{
2274 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002275 struct TCP_Server_Info *server;
2276
2277 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002278
2279 do {
2280 if (wdata->cfile->invalidHandle) {
2281 rc = cifs_reopen_file(wdata->cfile, false);
2282 if (rc != 0)
2283 continue;
2284 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002285 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002286 } while (rc == -EAGAIN);
2287
2288 return rc;
2289}
2290
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002291static ssize_t
2292cifs_iovec_write(struct file *file, const struct iovec *iov,
2293 unsigned long nr_segs, loff_t *poffset)
2294{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002295 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002296 size_t copied, len, cur_len;
2297 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002298 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002299 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002300 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002301 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002302 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002303 struct cifs_writedata *wdata, *tmp;
2304 struct list_head wdata_list;
2305 int rc;
2306 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002307
2308 len = iov_length(iov, nr_segs);
2309 if (!len)
2310 return 0;
2311
2312 rc = generic_write_checks(file, poffset, &len, 0);
2313 if (rc)
2314 return rc;
2315
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002316 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002317 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002318 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002319 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002320
2321 if (!tcon->ses->server->ops->async_writev)
2322 return -ENOSYS;
2323
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002324 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002325
2326 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2327 pid = open_file->pid;
2328 else
2329 pid = current->tgid;
2330
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002331 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002332 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002333 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002334
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002335 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2336 wdata = cifs_writedata_alloc(nr_pages,
2337 cifs_uncached_writev_complete);
2338 if (!wdata) {
2339 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002340 break;
2341 }
2342
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002343 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2344 if (rc) {
2345 kfree(wdata);
2346 break;
2347 }
2348
2349 save_len = cur_len;
2350 for (i = 0; i < nr_pages; i++) {
2351 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2352 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2353 0, copied);
2354 cur_len -= copied;
2355 iov_iter_advance(&it, copied);
2356 }
2357 cur_len = save_len - cur_len;
2358
2359 wdata->sync_mode = WB_SYNC_ALL;
2360 wdata->nr_pages = nr_pages;
2361 wdata->offset = (__u64)offset;
2362 wdata->cfile = cifsFileInfo_get(open_file);
2363 wdata->pid = pid;
2364 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002365 wdata->pagesz = PAGE_SIZE;
2366 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002367 rc = cifs_uncached_retry_writev(wdata);
2368 if (rc) {
2369 kref_put(&wdata->refcount, cifs_writedata_release);
2370 break;
2371 }
2372
2373 list_add_tail(&wdata->list, &wdata_list);
2374 offset += cur_len;
2375 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002376 } while (len > 0);
2377
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002378 /*
2379 * If at least one write was successfully sent, then discard any rc
2380 * value from the later writes. If the other write succeeds, then
2381 * we'll end up returning whatever was written. If it fails, then
2382 * we'll get a new rc value from that.
2383 */
2384 if (!list_empty(&wdata_list))
2385 rc = 0;
2386
2387 /*
2388 * Wait for and collect replies for any successful sends in order of
2389 * increasing offset. Once an error is hit or we get a fatal signal
2390 * while waiting, then return without waiting for any more replies.
2391 */
2392restart_loop:
2393 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2394 if (!rc) {
2395 /* FIXME: freezable too? */
2396 rc = wait_for_completion_killable(&wdata->done);
2397 if (rc)
2398 rc = -EINTR;
2399 else if (wdata->result)
2400 rc = wdata->result;
2401 else
2402 total_written += wdata->bytes;
2403
2404 /* resend call if it's a retryable error */
2405 if (rc == -EAGAIN) {
2406 rc = cifs_uncached_retry_writev(wdata);
2407 goto restart_loop;
2408 }
2409 }
2410 list_del_init(&wdata->list);
2411 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002412 }
2413
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002414 if (total_written > 0)
2415 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002416
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002417 cifs_stats_bytes_written(tcon, total_written);
2418 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002419}
2420
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002421ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002422 unsigned long nr_segs, loff_t pos)
2423{
2424 ssize_t written;
2425 struct inode *inode;
2426
2427 inode = iocb->ki_filp->f_path.dentry->d_inode;
2428
2429 /*
2430 * BB - optimize the way when signing is disabled. We can drop this
2431 * extra memory-to-memory copying and use iovec buffers for constructing
2432 * write request.
2433 */
2434
2435 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2436 if (written > 0) {
2437 CIFS_I(inode)->invalid_mapping = true;
2438 iocb->ki_pos = pos;
2439 }
2440
2441 return written;
2442}
2443
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002444static ssize_t
2445cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2446 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002447{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002448 struct file *file = iocb->ki_filp;
2449 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2450 struct inode *inode = file->f_mapping->host;
2451 struct cifsInodeInfo *cinode = CIFS_I(inode);
2452 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2453 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002454
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002455 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002456
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002457 sb_start_write(inode->i_sb);
2458
2459 /*
2460 * We need to hold the sem to be sure nobody modifies lock list
2461 * with a brlock that prevents writing.
2462 */
2463 down_read(&cinode->lock_sem);
2464 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2465 server->vals->exclusive_lock_type, NULL,
2466 true)) {
2467 mutex_lock(&inode->i_mutex);
2468 rc = __generic_file_aio_write(iocb, iov, nr_segs,
2469 &iocb->ki_pos);
2470 mutex_unlock(&inode->i_mutex);
2471 }
2472
2473 if (rc > 0 || rc == -EIOCBQUEUED) {
2474 ssize_t err;
2475
2476 err = generic_write_sync(file, pos, rc);
2477 if (err < 0 && rc > 0)
2478 rc = err;
2479 }
2480
2481 up_read(&cinode->lock_sem);
2482 sb_end_write(inode->i_sb);
2483 return rc;
2484}
2485
2486ssize_t
2487cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2488 unsigned long nr_segs, loff_t pos)
2489{
2490 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2491 struct cifsInodeInfo *cinode = CIFS_I(inode);
2492 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2493 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2494 iocb->ki_filp->private_data;
2495 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002496
Pavel Shilovsky25078102012-09-19 06:22:45 -07002497#ifdef CONFIG_CIFS_SMB2
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002498 /*
Pavel Shilovsky25078102012-09-19 06:22:45 -07002499 * If we have an oplock for read and want to write a data to the file
2500 * we need to store it in the page cache and then push it to the server
2501 * to be sure the next read will get a valid data.
2502 */
2503 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead) {
2504 ssize_t written;
2505 int rc;
2506
2507 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
2508 rc = filemap_fdatawrite(inode->i_mapping);
2509 if (rc)
2510 return (ssize_t)rc;
2511
2512 return written;
2513 }
2514#endif
2515
2516 /*
2517 * For non-oplocked files in strict cache mode we need to write the data
2518 * to the server exactly from the pos to pos+len-1 rather than flush all
2519 * affected pages because it may cause a error with mandatory locks on
2520 * these pages but not on the region from pos to ppos+len-1.
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002521 */
2522
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002523 if (!cinode->clientCanCacheAll)
2524 return cifs_user_writev(iocb, iov, nr_segs, pos);
2525
2526 if (cap_unix(tcon->ses) &&
2527 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2528 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2529 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2530
2531 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002532}
2533
Jeff Layton0471ca32012-05-16 07:13:16 -04002534static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002535cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002536{
2537 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002538
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002539 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2540 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002541 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002542 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002543 INIT_LIST_HEAD(&rdata->list);
2544 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002545 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002546 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002547
Jeff Layton0471ca32012-05-16 07:13:16 -04002548 return rdata;
2549}
2550
Jeff Layton6993f742012-05-16 07:13:17 -04002551void
2552cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002553{
Jeff Layton6993f742012-05-16 07:13:17 -04002554 struct cifs_readdata *rdata = container_of(refcount,
2555 struct cifs_readdata, refcount);
2556
2557 if (rdata->cfile)
2558 cifsFileInfo_put(rdata->cfile);
2559
Jeff Layton0471ca32012-05-16 07:13:16 -04002560 kfree(rdata);
2561}
2562
Jeff Layton2a1bb132012-05-16 07:13:17 -04002563static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002564cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002565{
2566 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002567 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002568 unsigned int i;
2569
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002570 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002571 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2572 if (!page) {
2573 rc = -ENOMEM;
2574 break;
2575 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002576 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002577 }
2578
2579 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002580 for (i = 0; i < nr_pages; i++) {
2581 put_page(rdata->pages[i]);
2582 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002583 }
2584 }
2585 return rc;
2586}
2587
2588static void
2589cifs_uncached_readdata_release(struct kref *refcount)
2590{
Jeff Layton1c892542012-05-16 07:13:17 -04002591 struct cifs_readdata *rdata = container_of(refcount,
2592 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002593 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002594
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002595 for (i = 0; i < rdata->nr_pages; i++) {
2596 put_page(rdata->pages[i]);
2597 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002598 }
2599 cifs_readdata_release(refcount);
2600}
2601
2602static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002603cifs_retry_async_readv(struct cifs_readdata *rdata)
2604{
2605 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002606 struct TCP_Server_Info *server;
2607
2608 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002609
2610 do {
2611 if (rdata->cfile->invalidHandle) {
2612 rc = cifs_reopen_file(rdata->cfile, true);
2613 if (rc != 0)
2614 continue;
2615 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002616 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002617 } while (rc == -EAGAIN);
2618
2619 return rc;
2620}
2621
Jeff Layton1c892542012-05-16 07:13:17 -04002622/**
2623 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2624 * @rdata: the readdata response with list of pages holding data
2625 * @iov: vector in which we should copy the data
2626 * @nr_segs: number of segments in vector
2627 * @offset: offset into file of the first iovec
2628 * @copied: used to return the amount of data copied to the iov
2629 *
2630 * This function copies data from a list of pages in a readdata response into
2631 * an array of iovecs. It will first calculate where the data should go
2632 * based on the info in the readdata and then copy the data into that spot.
2633 */
2634static ssize_t
2635cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2636 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2637{
2638 int rc = 0;
2639 struct iov_iter ii;
2640 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002641 ssize_t remaining = rdata->bytes;
2642 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002643 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002644
2645 /* set up iov_iter and advance to the correct offset */
2646 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2647 iov_iter_advance(&ii, pos);
2648
2649 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002650 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002651 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002652 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002653
2654 /* copy a whole page or whatever's left */
2655 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2656
2657 /* ...but limit it to whatever space is left in the iov */
2658 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2659
2660 /* go while there's data to be copied and no errors */
2661 if (copy && !rc) {
2662 pdata = kmap(page);
2663 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2664 (int)copy);
2665 kunmap(page);
2666 if (!rc) {
2667 *copied += copy;
2668 remaining -= copy;
2669 iov_iter_advance(&ii, copy);
2670 }
2671 }
Jeff Layton1c892542012-05-16 07:13:17 -04002672 }
2673
2674 return rc;
2675}
2676
2677static void
2678cifs_uncached_readv_complete(struct work_struct *work)
2679{
2680 struct cifs_readdata *rdata = container_of(work,
2681 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002682
2683 complete(&rdata->done);
2684 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2685}
2686
2687static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002688cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2689 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002690{
Jeff Layton8321fec2012-09-19 06:22:32 -07002691 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002692 unsigned int i;
2693 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002694 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002695
Jeff Layton8321fec2012-09-19 06:22:32 -07002696 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002697 for (i = 0; i < nr_pages; i++) {
2698 struct page *page = rdata->pages[i];
2699
Jeff Layton8321fec2012-09-19 06:22:32 -07002700 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002701 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002702 iov.iov_base = kmap(page);
2703 iov.iov_len = PAGE_SIZE;
2704 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2705 i, iov.iov_base, iov.iov_len);
2706 len -= PAGE_SIZE;
2707 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002708 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002709 iov.iov_base = kmap(page);
2710 iov.iov_len = len;
2711 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2712 i, iov.iov_base, iov.iov_len);
2713 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2714 rdata->tailsz = len;
2715 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002716 } else {
2717 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002718 rdata->pages[i] = NULL;
2719 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002720 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002721 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002722 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002723
2724 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2725 kunmap(page);
2726 if (result < 0)
2727 break;
2728
2729 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002730 }
2731
Jeff Layton8321fec2012-09-19 06:22:32 -07002732 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002733}
2734
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002735static ssize_t
2736cifs_iovec_read(struct file *file, const struct iovec *iov,
2737 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738{
Jeff Layton1c892542012-05-16 07:13:17 -04002739 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002740 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002741 ssize_t total_read = 0;
2742 loff_t offset = *poffset;
2743 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002745 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002747 struct cifs_readdata *rdata, *tmp;
2748 struct list_head rdata_list;
2749 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002750
2751 if (!nr_segs)
2752 return 0;
2753
2754 len = iov_length(iov, nr_segs);
2755 if (!len)
2756 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757
Jeff Layton1c892542012-05-16 07:13:17 -04002758 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002759 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002760 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002761 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002763 if (!tcon->ses->server->ops->async_readv)
2764 return -ENOSYS;
2765
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002766 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2767 pid = open_file->pid;
2768 else
2769 pid = current->tgid;
2770
Steve Frenchad7a2922008-02-07 23:25:02 +00002771 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002772 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002773
Jeff Layton1c892542012-05-16 07:13:17 -04002774 do {
2775 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2776 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002777
Jeff Layton1c892542012-05-16 07:13:17 -04002778 /* allocate a readdata struct */
2779 rdata = cifs_readdata_alloc(npages,
2780 cifs_uncached_readv_complete);
2781 if (!rdata) {
2782 rc = -ENOMEM;
2783 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002785
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002786 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002787 if (rc)
2788 goto error;
2789
2790 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002791 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002792 rdata->offset = offset;
2793 rdata->bytes = cur_len;
2794 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002795 rdata->pagesz = PAGE_SIZE;
2796 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002797
2798 rc = cifs_retry_async_readv(rdata);
2799error:
2800 if (rc) {
2801 kref_put(&rdata->refcount,
2802 cifs_uncached_readdata_release);
2803 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 }
Jeff Layton1c892542012-05-16 07:13:17 -04002805
2806 list_add_tail(&rdata->list, &rdata_list);
2807 offset += cur_len;
2808 len -= cur_len;
2809 } while (len > 0);
2810
2811 /* if at least one read request send succeeded, then reset rc */
2812 if (!list_empty(&rdata_list))
2813 rc = 0;
2814
2815 /* the loop below should proceed in the order of increasing offsets */
2816restart_loop:
2817 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2818 if (!rc) {
2819 ssize_t copied;
2820
2821 /* FIXME: freezable sleep too? */
2822 rc = wait_for_completion_killable(&rdata->done);
2823 if (rc)
2824 rc = -EINTR;
2825 else if (rdata->result)
2826 rc = rdata->result;
2827 else {
2828 rc = cifs_readdata_to_iov(rdata, iov,
2829 nr_segs, *poffset,
2830 &copied);
2831 total_read += copied;
2832 }
2833
2834 /* resend call if it's a retryable error */
2835 if (rc == -EAGAIN) {
2836 rc = cifs_retry_async_readv(rdata);
2837 goto restart_loop;
2838 }
2839 }
2840 list_del_init(&rdata->list);
2841 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002843
Jeff Layton1c892542012-05-16 07:13:17 -04002844 cifs_stats_bytes_read(tcon, total_read);
2845 *poffset += total_read;
2846
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002847 /* mask nodata case */
2848 if (rc == -ENODATA)
2849 rc = 0;
2850
Jeff Layton1c892542012-05-16 07:13:17 -04002851 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852}
2853
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002854ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002855 unsigned long nr_segs, loff_t pos)
2856{
2857 ssize_t read;
2858
2859 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2860 if (read > 0)
2861 iocb->ki_pos = pos;
2862
2863 return read;
2864}
2865
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002866ssize_t
2867cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2868 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002869{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002870 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2871 struct cifsInodeInfo *cinode = CIFS_I(inode);
2872 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2873 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2874 iocb->ki_filp->private_data;
2875 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2876 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002877
2878 /*
2879 * In strict cache mode we need to read from the server all the time
2880 * if we don't have level II oplock because the server can delay mtime
2881 * change - so we can't make a decision about inode invalidating.
2882 * And we can also fail with pagereading if there are mandatory locks
2883 * on pages affected by this read but not on the region from pos to
2884 * pos+len-1.
2885 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002886 if (!cinode->clientCanCacheRead)
2887 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002888
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002889 if (cap_unix(tcon->ses) &&
2890 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2891 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2892 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2893
2894 /*
2895 * We need to hold the sem to be sure nobody modifies lock list
2896 * with a brlock that prevents reading.
2897 */
2898 down_read(&cinode->lock_sem);
2899 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2900 tcon->ses->server->vals->shared_lock_type,
2901 NULL, true))
2902 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2903 up_read(&cinode->lock_sem);
2904 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002905}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002907static ssize_t
2908cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909{
2910 int rc = -EACCES;
2911 unsigned int bytes_read = 0;
2912 unsigned int total_read;
2913 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002914 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002916 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002917 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002918 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002919 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002921 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002922 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002923 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002925 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002926 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002928 /* FIXME: set up handlers for larger reads and/or convert to async */
2929 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2930
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302932 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002933 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302934 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002936 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002937 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002938 server = tcon->ses->server;
2939
2940 if (!server->ops->sync_read) {
2941 free_xid(xid);
2942 return -ENOSYS;
2943 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002945 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2946 pid = open_file->pid;
2947 else
2948 pid = current->tgid;
2949
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002951 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002953 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2954 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002955 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002956 /*
2957 * For windows me and 9x we do not want to request more than it
2958 * negotiated since it will refuse the read then.
2959 */
2960 if ((tcon->ses) && !(tcon->ses->capabilities &
2961 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002962 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002963 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002964 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 rc = -EAGAIN;
2966 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002967 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002968 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969 if (rc != 0)
2970 break;
2971 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002972 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002973 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002974 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002975 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002976 rc = server->ops->sync_read(xid, open_file, &io_parms,
2977 &bytes_read, &cur_offset,
2978 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 }
2980 if (rc || (bytes_read == 0)) {
2981 if (total_read) {
2982 break;
2983 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002984 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 return rc;
2986 }
2987 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002988 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002989 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 }
2991 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002992 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 return total_read;
2994}
2995
Jeff Laytonca83ce32011-04-12 09:13:44 -04002996/*
2997 * If the page is mmap'ed into a process' page tables, then we need to make
2998 * sure that it doesn't change while being written back.
2999 */
3000static int
3001cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3002{
3003 struct page *page = vmf->page;
3004
3005 lock_page(page);
3006 return VM_FAULT_LOCKED;
3007}
3008
3009static struct vm_operations_struct cifs_file_vm_ops = {
3010 .fault = filemap_fault,
3011 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07003012 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003013};
3014
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003015int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3016{
3017 int rc, xid;
3018 struct inode *inode = file->f_path.dentry->d_inode;
3019
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003020 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003021
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003022 if (!CIFS_I(inode)->clientCanCacheRead) {
3023 rc = cifs_invalidate_mapping(inode);
3024 if (rc)
3025 return rc;
3026 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003027
3028 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003029 if (rc == 0)
3030 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003031 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003032 return rc;
3033}
3034
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3036{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 int rc, xid;
3038
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003039 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003040 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00003042 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003043 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 return rc;
3045 }
3046 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003047 if (rc == 0)
3048 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003049 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050 return rc;
3051}
3052
Jeff Layton0471ca32012-05-16 07:13:16 -04003053static void
3054cifs_readv_complete(struct work_struct *work)
3055{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003056 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003057 struct cifs_readdata *rdata = container_of(work,
3058 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003059
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003060 for (i = 0; i < rdata->nr_pages; i++) {
3061 struct page *page = rdata->pages[i];
3062
Jeff Layton0471ca32012-05-16 07:13:16 -04003063 lru_cache_add_file(page);
3064
3065 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003066 flush_dcache_page(page);
3067 SetPageUptodate(page);
3068 }
3069
3070 unlock_page(page);
3071
3072 if (rdata->result == 0)
3073 cifs_readpage_to_fscache(rdata->mapping->host, page);
3074
3075 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003076 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003077 }
Jeff Layton6993f742012-05-16 07:13:17 -04003078 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003079}
3080
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003081static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003082cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3083 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003084{
Jeff Layton8321fec2012-09-19 06:22:32 -07003085 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003086 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003087 u64 eof;
3088 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003089 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003090 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003091
3092 /* determine the eof that the server (probably) has */
3093 eof = CIFS_I(rdata->mapping->host)->server_eof;
3094 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3095 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3096
Jeff Layton8321fec2012-09-19 06:22:32 -07003097 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003098 for (i = 0; i < nr_pages; i++) {
3099 struct page *page = rdata->pages[i];
3100
Jeff Layton8321fec2012-09-19 06:22:32 -07003101 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003102 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003103 iov.iov_base = kmap(page);
3104 iov.iov_len = PAGE_CACHE_SIZE;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003105 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003106 i, page->index, iov.iov_base, iov.iov_len);
3107 len -= PAGE_CACHE_SIZE;
3108 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003109 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003110 iov.iov_base = kmap(page);
3111 iov.iov_len = len;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003112 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003113 i, page->index, iov.iov_base, iov.iov_len);
3114 memset(iov.iov_base + len,
3115 '\0', PAGE_CACHE_SIZE - len);
3116 rdata->tailsz = len;
3117 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003118 } else if (page->index > eof_index) {
3119 /*
3120 * The VFS will not try to do readahead past the
3121 * i_size, but it's possible that we have outstanding
3122 * writes with gaps in the middle and the i_size hasn't
3123 * caught up yet. Populate those with zeroed out pages
3124 * to prevent the VFS from repeatedly attempting to
3125 * fill them until the writes are flushed.
3126 */
3127 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003128 lru_cache_add_file(page);
3129 flush_dcache_page(page);
3130 SetPageUptodate(page);
3131 unlock_page(page);
3132 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003133 rdata->pages[i] = NULL;
3134 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003135 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003136 } else {
3137 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003138 lru_cache_add_file(page);
3139 unlock_page(page);
3140 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003141 rdata->pages[i] = NULL;
3142 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003143 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003144 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003145
3146 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3147 kunmap(page);
3148 if (result < 0)
3149 break;
3150
3151 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003152 }
3153
Jeff Layton8321fec2012-09-19 06:22:32 -07003154 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003155}
3156
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157static int cifs_readpages(struct file *file, struct address_space *mapping,
3158 struct list_head *page_list, unsigned num_pages)
3159{
Jeff Layton690c5e32011-10-19 15:30:16 -04003160 int rc;
3161 struct list_head tmplist;
3162 struct cifsFileInfo *open_file = file->private_data;
3163 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3164 unsigned int rsize = cifs_sb->rsize;
3165 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166
Jeff Layton690c5e32011-10-19 15:30:16 -04003167 /*
3168 * Give up immediately if rsize is too small to read an entire page.
3169 * The VFS will fall back to readpage. We should never reach this
3170 * point however since we set ra_pages to 0 when the rsize is smaller
3171 * than a cache page.
3172 */
3173 if (unlikely(rsize < PAGE_CACHE_SIZE))
3174 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003175
Suresh Jayaraman566982362010-07-05 18:13:25 +05303176 /*
3177 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3178 * immediately if the cookie is negative
3179 */
3180 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3181 &num_pages);
3182 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003183 return rc;
Suresh Jayaraman566982362010-07-05 18:13:25 +05303184
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003185 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3186 pid = open_file->pid;
3187 else
3188 pid = current->tgid;
3189
Jeff Layton690c5e32011-10-19 15:30:16 -04003190 rc = 0;
3191 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192
Jeff Layton690c5e32011-10-19 15:30:16 -04003193 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3194 mapping, num_pages);
3195
3196 /*
3197 * Start with the page at end of list and move it to private
3198 * list. Do the same with any following pages until we hit
3199 * the rsize limit, hit an index discontinuity, or run out of
3200 * pages. Issue the async read and then start the loop again
3201 * until the list is empty.
3202 *
3203 * Note that list order is important. The page_list is in
3204 * the order of declining indexes. When we put the pages in
3205 * the rdata->pages, then we want them in increasing order.
3206 */
3207 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003208 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003209 unsigned int bytes = PAGE_CACHE_SIZE;
3210 unsigned int expected_index;
3211 unsigned int nr_pages = 1;
3212 loff_t offset;
3213 struct page *page, *tpage;
3214 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215
3216 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003217
Jeff Layton690c5e32011-10-19 15:30:16 -04003218 /*
3219 * Lock the page and put it in the cache. Since no one else
3220 * should have access to this page, we're safe to simply set
3221 * PG_locked without checking it first.
3222 */
3223 __set_page_locked(page);
3224 rc = add_to_page_cache_locked(page, mapping,
3225 page->index, GFP_KERNEL);
3226
3227 /* give up if we can't stick it in the cache */
3228 if (rc) {
3229 __clear_page_locked(page);
3230 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232
Jeff Layton690c5e32011-10-19 15:30:16 -04003233 /* move first page to the tmplist */
3234 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3235 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236
Jeff Layton690c5e32011-10-19 15:30:16 -04003237 /* now try and add more pages onto the request */
3238 expected_index = page->index + 1;
3239 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3240 /* discontinuity ? */
3241 if (page->index != expected_index)
3242 break;
3243
3244 /* would this page push the read over the rsize? */
3245 if (bytes + PAGE_CACHE_SIZE > rsize)
3246 break;
3247
3248 __set_page_locked(page);
3249 if (add_to_page_cache_locked(page, mapping,
3250 page->index, GFP_KERNEL)) {
3251 __clear_page_locked(page);
3252 break;
3253 }
3254 list_move_tail(&page->lru, &tmplist);
3255 bytes += PAGE_CACHE_SIZE;
3256 expected_index++;
3257 nr_pages++;
3258 }
3259
Jeff Layton0471ca32012-05-16 07:13:16 -04003260 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003261 if (!rdata) {
3262 /* best to give up if we're out of mem */
3263 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3264 list_del(&page->lru);
3265 lru_cache_add_file(page);
3266 unlock_page(page);
3267 page_cache_release(page);
3268 }
3269 rc = -ENOMEM;
3270 break;
3271 }
3272
Jeff Layton6993f742012-05-16 07:13:17 -04003273 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003274 rdata->mapping = mapping;
3275 rdata->offset = offset;
3276 rdata->bytes = bytes;
3277 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003278 rdata->pagesz = PAGE_CACHE_SIZE;
3279 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003280
3281 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3282 list_del(&page->lru);
3283 rdata->pages[rdata->nr_pages++] = page;
3284 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003285
Jeff Layton2a1bb132012-05-16 07:13:17 -04003286 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003287 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003288 for (i = 0; i < rdata->nr_pages; i++) {
3289 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003290 lru_cache_add_file(page);
3291 unlock_page(page);
3292 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293 }
Jeff Layton6993f742012-05-16 07:13:17 -04003294 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295 break;
3296 }
Jeff Layton6993f742012-05-16 07:13:17 -04003297
3298 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299 }
3300
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301 return rc;
3302}
3303
3304static int cifs_readpage_worker(struct file *file, struct page *page,
3305 loff_t *poffset)
3306{
3307 char *read_data;
3308 int rc;
3309
Suresh Jayaraman566982362010-07-05 18:13:25 +05303310 /* Is the page cached? */
3311 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3312 if (rc == 0)
3313 goto read_complete;
3314
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315 page_cache_get(page);
3316 read_data = kmap(page);
3317 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003318
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003320
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321 if (rc < 0)
3322 goto io_error;
3323 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003324 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003325
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003326 file->f_path.dentry->d_inode->i_atime =
3327 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003328
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329 if (PAGE_CACHE_SIZE > rc)
3330 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3331
3332 flush_dcache_page(page);
3333 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303334
3335 /* send this page to the cache */
3336 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3337
Linus Torvalds1da177e2005-04-16 15:20:36 -07003338 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003339
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003341 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342 page_cache_release(page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05303343
3344read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 return rc;
3346}
3347
3348static int cifs_readpage(struct file *file, struct page *page)
3349{
3350 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3351 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003352 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003354 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355
3356 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303357 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003358 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303359 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360 }
3361
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003362 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003363 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364
3365 rc = cifs_readpage_worker(file, page, &offset);
3366
3367 unlock_page(page);
3368
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003369 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 return rc;
3371}
3372
Steve Frencha403a0a2007-07-26 15:54:16 +00003373static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3374{
3375 struct cifsFileInfo *open_file;
3376
Jeff Layton44772882010-10-15 15:34:03 -04003377 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003378 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003379 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003380 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003381 return 1;
3382 }
3383 }
Jeff Layton44772882010-10-15 15:34:03 -04003384 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003385 return 0;
3386}
3387
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388/* We do not want to update the file size from server for inodes
3389 open for write - to avoid races with writepage extending
3390 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003391 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392 but this is tricky to do without racing with writebehind
3393 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003394bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395{
Steve Frencha403a0a2007-07-26 15:54:16 +00003396 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003397 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003398
Steve Frencha403a0a2007-07-26 15:54:16 +00003399 if (is_inode_writable(cifsInode)) {
3400 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003401 struct cifs_sb_info *cifs_sb;
3402
Steve Frenchc32a0b62006-01-12 14:41:28 -08003403 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003404 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003405 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003406 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003407 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003408 }
3409
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003410 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003411 return true;
Steve French7ba526312007-02-08 18:14:13 +00003412
Steve French4b18f2a2008-04-29 00:06:05 +00003413 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003414 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003415 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416}
3417
Nick Piggind9414772008-09-24 11:32:59 -04003418static int cifs_write_begin(struct file *file, struct address_space *mapping,
3419 loff_t pos, unsigned len, unsigned flags,
3420 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421{
Nick Piggind9414772008-09-24 11:32:59 -04003422 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3423 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003424 loff_t page_start = pos & PAGE_MASK;
3425 loff_t i_size;
3426 struct page *page;
3427 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428
Joe Perchesb6b38f72010-04-21 03:50:45 +00003429 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003430
Nick Piggin54566b22009-01-04 12:00:53 -08003431 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003432 if (!page) {
3433 rc = -ENOMEM;
3434 goto out;
3435 }
Nick Piggind9414772008-09-24 11:32:59 -04003436
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003437 if (PageUptodate(page))
3438 goto out;
Steve French8a236262007-03-06 00:31:00 +00003439
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003440 /*
3441 * If we write a full page it will be up to date, no need to read from
3442 * the server. If the write is short, we'll end up doing a sync write
3443 * instead.
3444 */
3445 if (len == PAGE_CACHE_SIZE)
3446 goto out;
3447
3448 /*
3449 * optimize away the read when we have an oplock, and we're not
3450 * expecting to use any of the data we'd be reading in. That
3451 * is, when the page lies beyond the EOF, or straddles the EOF
3452 * and the write will cover all of the existing data.
3453 */
3454 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3455 i_size = i_size_read(mapping->host);
3456 if (page_start >= i_size ||
3457 (offset == 0 && (pos + len) >= i_size)) {
3458 zero_user_segments(page, 0, offset,
3459 offset + len,
3460 PAGE_CACHE_SIZE);
3461 /*
3462 * PageChecked means that the parts of the page
3463 * to which we're not writing are considered up
3464 * to date. Once the data is copied to the
3465 * page, it can be set uptodate.
3466 */
3467 SetPageChecked(page);
3468 goto out;
3469 }
3470 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471
Nick Piggind9414772008-09-24 11:32:59 -04003472 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003473 /*
3474 * might as well read a page, it is fast enough. If we get
3475 * an error, we don't need to return it. cifs_write_end will
3476 * do a sync write instead since PG_uptodate isn't set.
3477 */
3478 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003479 } else {
3480 /* we could try using another file handle if there is one -
3481 but how would we lock it to prevent close of that handle
3482 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003483 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003484 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003485out:
3486 *pagep = page;
3487 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488}
3489
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303490static int cifs_release_page(struct page *page, gfp_t gfp)
3491{
3492 if (PagePrivate(page))
3493 return 0;
3494
3495 return cifs_fscache_release_page(page, gfp);
3496}
3497
3498static void cifs_invalidate_page(struct page *page, unsigned long offset)
3499{
3500 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3501
3502 if (offset == 0)
3503 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3504}
3505
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003506static int cifs_launder_page(struct page *page)
3507{
3508 int rc = 0;
3509 loff_t range_start = page_offset(page);
3510 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3511 struct writeback_control wbc = {
3512 .sync_mode = WB_SYNC_ALL,
3513 .nr_to_write = 0,
3514 .range_start = range_start,
3515 .range_end = range_end,
3516 };
3517
3518 cFYI(1, "Launder page: %p", page);
3519
3520 if (clear_page_dirty_for_io(page))
3521 rc = cifs_writepage_locked(page, &wbc);
3522
3523 cifs_fscache_invalidate_page(page, page->mapping->host);
3524 return rc;
3525}
3526
Tejun Heo9b646972010-07-20 22:09:02 +02003527void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003528{
3529 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3530 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003531 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003532 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003533 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003534 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003535
3536 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003537 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003538 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003539 else
Al Viro8737c932009-12-24 06:47:55 -05003540 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003541 rc = filemap_fdatawrite(inode->i_mapping);
3542 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003543 rc = filemap_fdatawait(inode->i_mapping);
3544 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003545 invalidate_remote_inode(inode);
3546 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003547 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003548 }
3549
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003550 rc = cifs_push_locks(cfile);
3551 if (rc)
3552 cERROR(1, "Push locks rc = %d", rc);
3553
Jeff Layton3bc303c2009-09-21 06:47:50 -04003554 /*
3555 * releasing stale oplock after recent reconnect of smb session using
3556 * a now incorrect file handle is not a data integrity issue but do
3557 * not bother sending an oplock release if session to server still is
3558 * disconnected since oplock already released by the server
3559 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003560 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003561 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3562 cinode);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003563 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003564 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003565}
3566
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003567const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003568 .readpage = cifs_readpage,
3569 .readpages = cifs_readpages,
3570 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003571 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003572 .write_begin = cifs_write_begin,
3573 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003574 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303575 .releasepage = cifs_release_page,
3576 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003577 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003579
3580/*
3581 * cifs_readpages requires the server to support a buffer large enough to
3582 * contain the header plus one complete page of data. Otherwise, we need
3583 * to leave cifs_readpages out of the address space operations.
3584 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003585const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003586 .readpage = cifs_readpage,
3587 .writepage = cifs_writepage,
3588 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003589 .write_begin = cifs_write_begin,
3590 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003591 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303592 .releasepage = cifs_release_page,
3593 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003594 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003595};