blob: 32129446beca6cc4c3cc0f007e88d4dde630ceeb [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Anna Schumaker1c6dcbe2014-09-26 13:58:48 -04002/*
3 * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com>
4 */
5#include <linux/fs.h>
Olga Kornievskaia04915672019-06-04 16:14:30 -04006#include <linux/sunrpc/addr.h>
Anna Schumaker1c6dcbe2014-09-26 13:58:48 -04007#include <linux/sunrpc/sched.h>
8#include <linux/nfs.h>
9#include <linux/nfs3.h>
10#include <linux/nfs4.h>
11#include <linux/nfs_xdr.h>
12#include <linux/nfs_fs.h>
13#include "nfs4_fs.h"
14#include "nfs42.h"
Peng Tao1b4a4bd2015-06-23 19:51:56 +080015#include "iostat.h"
16#include "pnfs.h"
Anna Schumakerefc6f4a2017-01-09 15:14:33 -050017#include "nfs4session.h"
Peng Tao1b4a4bd2015-06-23 19:51:56 +080018#include "internal.h"
Olga Kornievskaia04915672019-06-04 16:14:30 -040019#include "delegation.h"
Trond Myklebust638037b2020-08-04 11:02:44 -040020#include "nfs4trace.h"
Peng Tao1b4a4bd2015-06-23 19:51:56 +080021
Anna Schumaker291e1b92015-11-16 14:51:07 -050022#define NFSDBG_FACILITY NFSDBG_PROC
Olga Kornievskaiac975c202018-07-09 15:13:34 -040023static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std);
Anna Schumaker1c6dcbe2014-09-26 13:58:48 -040024
Olga Kornievskaia04915672019-06-04 16:14:30 -040025static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr)
26{
27 struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client;
28 unsigned short port = 2049;
29
30 rcu_read_lock();
31 naddr->netid_len = scnprintf(naddr->netid,
32 sizeof(naddr->netid), "%s",
33 rpc_peeraddr2str(clp->cl_rpcclient,
34 RPC_DISPLAY_NETID));
35 naddr->addr_len = scnprintf(naddr->addr,
36 sizeof(naddr->addr),
37 "%s.%u.%u",
38 rpc_peeraddr2str(clp->cl_rpcclient,
39 RPC_DISPLAY_ADDR),
40 port >> 8, port & 255);
41 rcu_read_unlock();
42}
43
Anna Schumakerf4ac1672014-11-25 13:18:15 -050044static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +010045 struct nfs_lock_context *lock, loff_t offset, loff_t len)
Anna Schumakerf4ac1672014-11-25 13:18:15 -050046{
47 struct inode *inode = file_inode(filep);
Anna Schumaker9a519402015-03-16 14:06:23 -040048 struct nfs_server *server = NFS_SERVER(inode);
Trond Myklebust34bf20c2021-12-27 14:40:52 -050049 u32 bitmask[NFS_BITMASK_SZ];
Anna Schumakerf4ac1672014-11-25 13:18:15 -050050 struct nfs42_falloc_args args = {
51 .falloc_fh = NFS_FH(inode),
52 .falloc_offset = offset,
53 .falloc_length = len,
Trond Myklebuste99812e2021-03-28 18:12:03 -040054 .falloc_bitmask = bitmask,
Anna Schumakerf4ac1672014-11-25 13:18:15 -050055 };
Anna Schumaker9a519402015-03-16 14:06:23 -040056 struct nfs42_falloc_res res = {
57 .falloc_server = server,
58 };
Anna Schumakerf4ac1672014-11-25 13:18:15 -050059 int status;
60
61 msg->rpc_argp = &args;
62 msg->rpc_resp = &res;
63
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +010064 status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
65 lock, FMODE_WRITE);
Olga Kornievskaiad826e5b2019-12-18 16:50:42 -050066 if (status) {
67 if (status == -EAGAIN)
68 status = -NFS4ERR_BAD_STATEID;
Anna Schumakerf4ac1672014-11-25 13:18:15 -050069 return status;
Olga Kornievskaiad826e5b2019-12-18 16:50:42 -050070 }
Anna Schumakerf4ac1672014-11-25 13:18:15 -050071
Trond Myklebust34bf20c2021-12-27 14:40:52 -050072 nfs4_bitmask_set(bitmask, server->cache_consistency_bitmask, inode,
73 NFS_INO_INVALID_BLOCKS);
Trond Myklebuste99812e2021-03-28 18:12:03 -040074
Anna Schumaker9a519402015-03-16 14:06:23 -040075 res.falloc_fattr = nfs_alloc_fattr();
76 if (!res.falloc_fattr)
77 return -ENOMEM;
78
79 status = nfs4_call_sync(server->client, server, msg,
80 &args.seq_args, &res.seq_res, 0);
81 if (status == 0)
Trond Myklebuste99812e2021-03-28 18:12:03 -040082 status = nfs_post_op_update_inode_force_wcc(inode,
83 res.falloc_fattr);
Anna Schumaker9a519402015-03-16 14:06:23 -040084
Olga Kornievskaia40a82412021-11-04 10:57:09 -040085 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE])
86 trace_nfs4_fallocate(inode, &args, status);
87 else
88 trace_nfs4_deallocate(inode, &args, status);
Anna Schumaker9a519402015-03-16 14:06:23 -040089 kfree(res.falloc_fattr);
90 return status;
Anna Schumakerf4ac1672014-11-25 13:18:15 -050091}
92
93static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
94 loff_t offset, loff_t len)
95{
Trond Myklebust99f23782021-03-28 18:17:14 -040096 struct inode *inode = file_inode(filep);
97 struct nfs_server *server = NFS_SERVER(inode);
Anna Schumakerf4ac1672014-11-25 13:18:15 -050098 struct nfs4_exception exception = { };
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +010099 struct nfs_lock_context *lock;
Anna Schumakerf4ac1672014-11-25 13:18:15 -0500100 int err;
101
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +0100102 lock = nfs_get_lock_context(nfs_file_open_context(filep));
103 if (IS_ERR(lock))
104 return PTR_ERR(lock);
105
Trond Myklebust99f23782021-03-28 18:17:14 -0400106 exception.inode = inode;
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +0100107 exception.state = lock->open_context->state;
108
Trond Myklebust99f23782021-03-28 18:17:14 -0400109 err = nfs_sync_inode(inode);
110 if (err)
111 goto out;
112
Anna Schumakerf4ac1672014-11-25 13:18:15 -0500113 do {
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +0100114 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
115 if (err == -ENOTSUPP) {
116 err = -EOPNOTSUPP;
117 break;
118 }
Anna Schumakerf4ac1672014-11-25 13:18:15 -0500119 err = nfs4_handle_exception(server, err, &exception);
120 } while (exception.retry);
Trond Myklebust99f23782021-03-28 18:17:14 -0400121out:
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +0100122 nfs_put_lock_context(lock);
Anna Schumakerf4ac1672014-11-25 13:18:15 -0500123 return err;
124}
125
126int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
127{
128 struct rpc_message msg = {
129 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE],
130 };
131 struct inode *inode = file_inode(filep);
132 int err;
133
134 if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE))
135 return -EOPNOTSUPP;
136
Al Viro59551022016-01-22 15:40:57 -0500137 inode_lock(inode);
Anna Schumakerf830f7d2015-03-16 14:06:24 -0400138
Anna Schumakerf4ac1672014-11-25 13:18:15 -0500139 err = nfs42_proc_fallocate(&msg, filep, offset, len);
140 if (err == -EOPNOTSUPP)
141 NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE;
Anna Schumakerf830f7d2015-03-16 14:06:24 -0400142
Al Viro59551022016-01-22 15:40:57 -0500143 inode_unlock(inode);
Anna Schumakerf4ac1672014-11-25 13:18:15 -0500144 return err;
145}
146
Anna Schumaker624bd5b2014-11-25 13:18:16 -0500147int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
148{
149 struct rpc_message msg = {
150 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE],
151 };
152 struct inode *inode = file_inode(filep);
153 int err;
154
155 if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE))
156 return -EOPNOTSUPP;
157
Al Viro59551022016-01-22 15:40:57 -0500158 inode_lock(inode);
Anna Schumakerf830f7d2015-03-16 14:06:24 -0400159
Anna Schumaker624bd5b2014-11-25 13:18:16 -0500160 err = nfs42_proc_fallocate(&msg, filep, offset, len);
Anna Schumaker9a519402015-03-16 14:06:23 -0400161 if (err == 0)
162 truncate_pagecache_range(inode, offset, (offset + len) -1);
Anna Schumaker624bd5b2014-11-25 13:18:16 -0500163 if (err == -EOPNOTSUPP)
164 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
Trond Myklebust99f23782021-03-28 18:17:14 -0400165
Al Viro59551022016-01-22 15:40:57 -0500166 inode_unlock(inode);
Anna Schumaker624bd5b2014-11-25 13:18:16 -0500167 return err;
168}
169
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400170static int handle_async_copy(struct nfs42_copy_res *res,
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400171 struct nfs_server *dst_server,
172 struct nfs_server *src_server,
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400173 struct file *src,
174 struct file *dst,
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400175 nfs4_stateid *src_stateid,
176 bool *restart)
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400177{
Olga Kornievskaia99f2c552018-11-21 11:24:22 -0500178 struct nfs4_copy_state *copy, *tmp_copy;
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400179 int status = NFS4_OK;
Olga Kornievskaiabc0c9072018-07-09 15:13:32 -0400180 bool found_pending = false;
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400181 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst);
182 struct nfs_open_context *src_ctx = nfs_file_open_context(src);
Olga Kornievskaiabc0c9072018-07-09 15:13:32 -0400183
Olga Kornievskaia99f2c552018-11-21 11:24:22 -0500184 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
185 if (!copy)
186 return -ENOMEM;
187
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400188 spin_lock(&dst_server->nfs_client->cl_lock);
189 list_for_each_entry(tmp_copy,
190 &dst_server->nfs_client->pending_cb_stateids,
Olga Kornievskaiabc0c9072018-07-09 15:13:32 -0400191 copies) {
Olga Kornievskaia99f2c552018-11-21 11:24:22 -0500192 if (memcmp(&res->write_res.stateid, &tmp_copy->stateid,
Olga Kornievskaiabc0c9072018-07-09 15:13:32 -0400193 NFS4_STATEID_SIZE))
194 continue;
195 found_pending = true;
Olga Kornievskaia99f2c552018-11-21 11:24:22 -0500196 list_del(&tmp_copy->copies);
Olga Kornievskaiabc0c9072018-07-09 15:13:32 -0400197 break;
198 }
199 if (found_pending) {
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400200 spin_unlock(&dst_server->nfs_client->cl_lock);
Olga Kornievskaia99f2c552018-11-21 11:24:22 -0500201 kfree(copy);
202 copy = tmp_copy;
Olga Kornievskaiabc0c9072018-07-09 15:13:32 -0400203 goto out;
204 }
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400205
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400206 memcpy(&copy->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE);
207 init_completion(&copy->completion);
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400208 copy->parent_dst_state = dst_ctx->state;
209 copy->parent_src_state = src_ctx->state;
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400210
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400211 list_add_tail(&copy->copies, &dst_server->ss_copies);
212 spin_unlock(&dst_server->nfs_client->cl_lock);
213
214 if (dst_server != src_server) {
215 spin_lock(&src_server->nfs_client->cl_lock);
216 list_add_tail(&copy->src_copies, &src_server->ss_copies);
217 spin_unlock(&src_server->nfs_client->cl_lock);
218 }
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400219
Olga Kornievskaiac975c202018-07-09 15:13:34 -0400220 status = wait_for_completion_interruptible(&copy->completion);
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400221 spin_lock(&dst_server->nfs_client->cl_lock);
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400222 list_del_init(&copy->copies);
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400223 spin_unlock(&dst_server->nfs_client->cl_lock);
224 if (dst_server != src_server) {
225 spin_lock(&src_server->nfs_client->cl_lock);
226 list_del_init(&copy->src_copies);
227 spin_unlock(&src_server->nfs_client->cl_lock);
228 }
Olga Kornievskaiac975c202018-07-09 15:13:34 -0400229 if (status == -ERESTARTSYS) {
Olga Kornievskaiae4648aa2018-08-13 15:33:01 -0400230 goto out_cancel;
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400231 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) {
Olga Kornievskaiae4648aa2018-08-13 15:33:01 -0400232 status = -EAGAIN;
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400233 *restart = true;
Olga Kornievskaiae4648aa2018-08-13 15:33:01 -0400234 goto out_cancel;
Olga Kornievskaiac975c202018-07-09 15:13:34 -0400235 }
Olga Kornievskaiabc0c9072018-07-09 15:13:32 -0400236out:
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400237 res->write_res.count = copy->count;
238 memcpy(&res->write_res.verifier, &copy->verf, sizeof(copy->verf));
239 status = -copy->error;
240
Olga Kornievskaia12406022019-07-02 14:57:25 -0400241out_free:
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400242 kfree(copy);
243 return status;
Olga Kornievskaiae4648aa2018-08-13 15:33:01 -0400244out_cancel:
245 nfs42_do_offload_cancel_async(dst, &copy->stateid);
Olga Kornievskaia12406022019-07-02 14:57:25 -0400246 if (!nfs42_files_from_same_server(src, dst))
247 nfs42_do_offload_cancel_async(src, src_stateid);
248 goto out_free;
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400249}
250
Olga Kornievskaia6b8d84e2018-07-09 15:13:36 -0400251static int process_copy_commit(struct file *dst, loff_t pos_dst,
252 struct nfs42_copy_res *res)
253{
254 struct nfs_commitres cres;
255 int status = -ENOMEM;
256
257 cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
258 if (!cres.verf)
259 goto out;
260
261 status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres);
262 if (status)
263 goto out_free;
264 if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
265 &cres.verf->verifier)) {
266 dprintk("commit verf differs from copy verf\n");
267 status = -EAGAIN;
268 }
269out_free:
270 kfree(cres.verf);
271out:
272 return status;
273}
274
Trond Myklebust94d202d2021-04-14 10:10:09 -0400275/**
276 * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload
277 * @inode: pointer to destination inode
278 * @pos: destination offset
279 * @len: copy length
280 *
281 * Punch a hole in the inode page cache, so that the NFS client will
282 * know to retrieve new data.
283 * Update the file size if necessary, and then mark the inode as having
284 * invalid cached values for change attribute, ctime, mtime and space used.
285 */
286static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len)
287{
288 loff_t newsize = pos + len;
289 loff_t end = newsize - 1;
290
Benjamin Coddington3f015d82021-11-16 10:48:13 -0500291 WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping,
292 pos >> PAGE_SHIFT, end >> PAGE_SHIFT));
293
Trond Myklebust94d202d2021-04-14 10:10:09 -0400294 spin_lock(&inode->i_lock);
295 if (newsize > i_size_read(inode))
296 i_size_write(inode, newsize);
297 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
298 NFS_INO_INVALID_CTIME |
299 NFS_INO_INVALID_MTIME |
300 NFS_INO_INVALID_BLOCKS);
301 spin_unlock(&inode->i_lock);
302}
303
Trond Myklebust9d8cacb2017-02-17 18:42:32 -0500304static ssize_t _nfs42_proc_copy(struct file *src,
Anna Schumaker2e724482013-05-21 16:53:03 -0400305 struct nfs_lock_context *src_lock,
Trond Myklebust9d8cacb2017-02-17 18:42:32 -0500306 struct file *dst,
Anna Schumaker2e724482013-05-21 16:53:03 -0400307 struct nfs_lock_context *dst_lock,
Trond Myklebust9d8cacb2017-02-17 18:42:32 -0500308 struct nfs42_copy_args *args,
Olga Kornievskaia1d38f3f2019-06-04 11:54:18 -0400309 struct nfs42_copy_res *res,
310 struct nl4_server *nss,
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400311 nfs4_stateid *cnr_stateid,
312 bool *restart)
Anna Schumaker2e724482013-05-21 16:53:03 -0400313{
Anna Schumaker2e724482013-05-21 16:53:03 -0400314 struct rpc_message msg = {
315 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY],
Trond Myklebust9d8cacb2017-02-17 18:42:32 -0500316 .rpc_argp = args,
317 .rpc_resp = res,
Anna Schumaker2e724482013-05-21 16:53:03 -0400318 };
319 struct inode *dst_inode = file_inode(dst);
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400320 struct inode *src_inode = file_inode(src);
321 struct nfs_server *dst_server = NFS_SERVER(dst_inode);
322 struct nfs_server *src_server = NFS_SERVER(src_inode);
Trond Myklebust9d8cacb2017-02-17 18:42:32 -0500323 loff_t pos_src = args->src_pos;
324 loff_t pos_dst = args->dst_pos;
325 size_t count = args->count;
Olga Kornievskaia1ee48bd2017-07-06 09:43:02 -0400326 ssize_t status;
Anna Schumaker2e724482013-05-21 16:53:03 -0400327
Olga Kornievskaia1d38f3f2019-06-04 11:54:18 -0400328 if (nss) {
329 args->cp_src = nss;
330 nfs4_stateid_copy(&args->src_stateid, cnr_stateid);
331 } else {
332 status = nfs4_set_rw_stateid(&args->src_stateid,
333 src_lock->open_context, src_lock, FMODE_READ);
Olga Kornievskaiad826e5b2019-12-18 16:50:42 -0500334 if (status) {
335 if (status == -EAGAIN)
336 status = -NFS4ERR_BAD_STATEID;
Olga Kornievskaia1d38f3f2019-06-04 11:54:18 -0400337 return status;
Olga Kornievskaiad826e5b2019-12-18 16:50:42 -0500338 }
Olga Kornievskaia1d38f3f2019-06-04 11:54:18 -0400339 }
Trond Myklebust837bb1d2016-06-25 18:12:03 -0400340 status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping,
341 pos_src, pos_src + (loff_t)count - 1);
342 if (status)
343 return status;
344
Trond Myklebust9d8cacb2017-02-17 18:42:32 -0500345 status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context,
Anna Schumaker2e724482013-05-21 16:53:03 -0400346 dst_lock, FMODE_WRITE);
Olga Kornievskaiad826e5b2019-12-18 16:50:42 -0500347 if (status) {
348 if (status == -EAGAIN)
349 status = -NFS4ERR_BAD_STATEID;
Anna Schumaker2e724482013-05-21 16:53:03 -0400350 return status;
Olga Kornievskaiad826e5b2019-12-18 16:50:42 -0500351 }
Anna Schumaker2e724482013-05-21 16:53:03 -0400352
Trond Myklebust837bb1d2016-06-25 18:12:03 -0400353 status = nfs_sync_inode(dst_inode);
354 if (status)
355 return status;
356
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400357 res->commit_res.verf = NULL;
358 if (args->sync) {
359 res->commit_res.verf =
360 kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
361 if (!res->commit_res.verf)
362 return -ENOMEM;
363 }
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400364 set_bit(NFS_CLNT_SRC_SSC_COPY_STATE,
365 &src_lock->open_context->state->flags);
Olga Kornievskaiae4648aa2018-08-13 15:33:01 -0400366 set_bit(NFS_CLNT_DST_SSC_COPY_STATE,
367 &dst_lock->open_context->state->flags);
368
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400369 status = nfs4_call_sync(dst_server->client, dst_server, &msg,
Trond Myklebust9d8cacb2017-02-17 18:42:32 -0500370 &args->seq_args, &res->seq_res, 0);
Olga Kornievskaiace7cea12021-11-04 10:57:10 -0400371 trace_nfs4_copy(src_inode, dst_inode, args, res, nss, status);
Anna Schumaker2e724482013-05-21 16:53:03 -0400372 if (status == -ENOTSUPP)
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400373 dst_server->caps &= ~NFS_CAP_COPY;
Anna Schumaker2e724482013-05-21 16:53:03 -0400374 if (status)
Olga Kornievskaiae0926932017-05-08 18:02:24 -0400375 goto out;
Anna Schumaker2e724482013-05-21 16:53:03 -0400376
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400377 if (args->sync &&
378 nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
Olga Kornievskaiae0926932017-05-08 18:02:24 -0400379 &res->commit_res.verf->verifier)) {
380 status = -EAGAIN;
381 goto out;
Anna Schumaker2e724482013-05-21 16:53:03 -0400382 }
383
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400384 if (!res->synchronous) {
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400385 status = handle_async_copy(res, dst_server, src_server, src,
386 dst, &args->src_stateid, restart);
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400387 if (status)
Wenwen Wang123c23c2020-02-03 03:47:53 +0000388 goto out;
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400389 }
390
Olga Kornievskaia6b8d84e2018-07-09 15:13:36 -0400391 if ((!res->synchronous || !args->sync) &&
392 res->write_res.verifier.committed != NFS_FILE_SYNC) {
393 status = process_copy_commit(dst, pos_dst, res);
394 if (status)
Wenwen Wang123c23c2020-02-03 03:47:53 +0000395 goto out;
Olga Kornievskaia6b8d84e2018-07-09 15:13:36 -0400396 }
397
Trond Myklebust94d202d2021-04-14 10:10:09 -0400398 nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count);
Trond Myklebustfebfeaa2021-04-14 10:27:39 -0400399 nfs_invalidate_atime(src_inode);
Olga Kornievskaiae0926932017-05-08 18:02:24 -0400400 status = res->write_res.count;
401out:
Olga Kornievskaia62164f32018-07-09 15:13:31 -0400402 if (args->sync)
403 kfree(res->commit_res.verf);
Olga Kornievskaiae0926932017-05-08 18:02:24 -0400404 return status;
Anna Schumaker2e724482013-05-21 16:53:03 -0400405}
406
407ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
Olga Kornievskaia1d38f3f2019-06-04 11:54:18 -0400408 struct file *dst, loff_t pos_dst, size_t count,
409 struct nl4_server *nss,
Olga Kornievskaia12751012019-07-03 10:38:02 -0400410 nfs4_stateid *cnr_stateid, bool sync)
Anna Schumaker2e724482013-05-21 16:53:03 -0400411{
412 struct nfs_server *server = NFS_SERVER(file_inode(dst));
413 struct nfs_lock_context *src_lock;
414 struct nfs_lock_context *dst_lock;
Trond Myklebust9d8cacb2017-02-17 18:42:32 -0500415 struct nfs42_copy_args args = {
416 .src_fh = NFS_FH(file_inode(src)),
417 .src_pos = pos_src,
418 .dst_fh = NFS_FH(file_inode(dst)),
419 .dst_pos = pos_dst,
420 .count = count,
Olga Kornievskaia12751012019-07-03 10:38:02 -0400421 .sync = sync,
Trond Myklebust9d8cacb2017-02-17 18:42:32 -0500422 };
423 struct nfs42_copy_res res;
424 struct nfs4_exception src_exception = {
425 .inode = file_inode(src),
426 .stateid = &args.src_stateid,
427 };
428 struct nfs4_exception dst_exception = {
429 .inode = file_inode(dst),
430 .stateid = &args.dst_stateid,
431 };
Anna Schumaker2e724482013-05-21 16:53:03 -0400432 ssize_t err, err2;
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400433 bool restart = false;
Anna Schumaker2e724482013-05-21 16:53:03 -0400434
Anna Schumaker2e724482013-05-21 16:53:03 -0400435 src_lock = nfs_get_lock_context(nfs_file_open_context(src));
436 if (IS_ERR(src_lock))
437 return PTR_ERR(src_lock);
438
Anna Schumaker2e724482013-05-21 16:53:03 -0400439 src_exception.state = src_lock->open_context->state;
440
441 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst));
442 if (IS_ERR(dst_lock)) {
443 err = PTR_ERR(dst_lock);
444 goto out_put_src_lock;
445 }
446
Anna Schumaker2e724482013-05-21 16:53:03 -0400447 dst_exception.state = dst_lock->open_context->state;
448
449 do {
Linus Torvaldsea8ea732016-05-26 10:33:33 -0700450 inode_lock(file_inode(dst));
Trond Myklebust9d8cacb2017-02-17 18:42:32 -0500451 err = _nfs42_proc_copy(src, src_lock,
452 dst, dst_lock,
Olga Kornievskaia1d38f3f2019-06-04 11:54:18 -0400453 &args, &res,
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400454 nss, cnr_stateid, &restart);
Linus Torvaldsea8ea732016-05-26 10:33:33 -0700455 inode_unlock(file_inode(dst));
Anna Schumaker2e724482013-05-21 16:53:03 -0400456
Trond Myklebust9d8cacb2017-02-17 18:42:32 -0500457 if (err >= 0)
458 break;
Olga Kornievskaia12406022019-07-02 14:57:25 -0400459 if (err == -ENOTSUPP &&
460 nfs42_files_from_same_server(src, dst)) {
Anna Schumaker2e724482013-05-21 16:53:03 -0400461 err = -EOPNOTSUPP;
462 break;
Olga Kornievskaia539f57b2018-07-09 15:13:35 -0400463 } else if (err == -EAGAIN) {
Olga Kornievskaia0e65a322019-06-14 14:38:40 -0400464 if (!restart) {
465 dst_exception.retry = 1;
466 continue;
467 }
468 break;
Olga Kornievskaia539f57b2018-07-09 15:13:35 -0400469 } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) {
470 args.sync = true;
Olga Kornievskaiae0926932017-05-08 18:02:24 -0400471 dst_exception.retry = 1;
472 continue;
Olga Kornievskaia6b61c962017-09-26 13:51:39 -0400473 } else if ((err == -ESTALE ||
Olga Kornievskaia12406022019-07-02 14:57:25 -0400474 err == -NFS4ERR_OFFLOAD_DENIED ||
475 err == -ENOTSUPP) &&
Olga Kornievskaia7e350192017-09-25 15:59:44 -0400476 !nfs42_files_from_same_server(src, dst)) {
477 nfs42_do_offload_cancel_async(src, &args.src_stateid);
478 err = -EOPNOTSUPP;
479 break;
Anna Schumaker2e724482013-05-21 16:53:03 -0400480 }
481
482 err2 = nfs4_handle_exception(server, err, &src_exception);
483 err = nfs4_handle_exception(server, err, &dst_exception);
484 if (!err)
485 err = err2;
486 } while (src_exception.retry || dst_exception.retry);
487
488 nfs_put_lock_context(dst_lock);
489out_put_src_lock:
490 nfs_put_lock_context(src_lock);
491 return err;
492}
493
Olga Kornievskaiac975c202018-07-09 15:13:34 -0400494struct nfs42_offloadcancel_data {
495 struct nfs_server *seq_server;
496 struct nfs42_offload_status_args args;
497 struct nfs42_offload_status_res res;
498};
499
500static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata)
501{
502 struct nfs42_offloadcancel_data *data = calldata;
503
504 nfs4_setup_sequence(data->seq_server->nfs_client,
505 &data->args.osa_seq_args,
506 &data->res.osr_seq_res, task);
507}
508
509static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata)
510{
511 struct nfs42_offloadcancel_data *data = calldata;
512
Olga Kornievskaia127beca2021-11-04 10:57:14 -0400513 trace_nfs4_offload_cancel(&data->args, task->tk_status);
Olga Kornievskaiac975c202018-07-09 15:13:34 -0400514 nfs41_sequence_done(task, &data->res.osr_seq_res);
515 if (task->tk_status &&
516 nfs4_async_handle_error(task, data->seq_server, NULL,
517 NULL) == -EAGAIN)
518 rpc_restart_call_prepare(task);
519}
520
521static void nfs42_free_offloadcancel_data(void *data)
522{
523 kfree(data);
524}
525
526static const struct rpc_call_ops nfs42_offload_cancel_ops = {
527 .rpc_call_prepare = nfs42_offload_cancel_prepare,
528 .rpc_call_done = nfs42_offload_cancel_done,
529 .rpc_release = nfs42_free_offloadcancel_data,
530};
531
532static int nfs42_do_offload_cancel_async(struct file *dst,
533 nfs4_stateid *stateid)
534{
535 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
536 struct nfs42_offloadcancel_data *data = NULL;
537 struct nfs_open_context *ctx = nfs_file_open_context(dst);
538 struct rpc_task *task;
539 struct rpc_message msg = {
540 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL],
541 .rpc_cred = ctx->cred,
542 };
543 struct rpc_task_setup task_setup_data = {
544 .rpc_client = dst_server->client,
545 .rpc_message = &msg,
546 .callback_ops = &nfs42_offload_cancel_ops,
547 .workqueue = nfsiod_workqueue,
548 .flags = RPC_TASK_ASYNC,
549 };
550 int status;
551
552 if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL))
553 return -EOPNOTSUPP;
554
555 data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS);
556 if (data == NULL)
557 return -ENOMEM;
558
559 data->seq_server = dst_server;
560 data->args.osa_src_fh = NFS_FH(file_inode(dst));
561 memcpy(&data->args.osa_stateid, stateid,
562 sizeof(data->args.osa_stateid));
563 msg.rpc_argp = &data->args;
564 msg.rpc_resp = &data->res;
565 task_setup_data.callback_data = data;
566 nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res,
567 1, 0);
568 task = rpc_run_task(&task_setup_data);
569 if (IS_ERR(task))
570 return PTR_ERR(task);
571 status = rpc_wait_for_completion_task(task);
572 if (status == -ENOTSUPP)
573 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL;
574 rpc_put_task(task);
575 return status;
576}
577
YueHaibing00030102019-11-14 22:01:41 +0800578static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
579 struct nfs42_copy_notify_args *args,
580 struct nfs42_copy_notify_res *res)
Olga Kornievskaia04915672019-06-04 16:14:30 -0400581{
582 struct nfs_server *src_server = NFS_SERVER(file_inode(src));
583 struct rpc_message msg = {
584 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY],
585 .rpc_argp = args,
586 .rpc_resp = res,
587 };
588 int status;
589 struct nfs_open_context *ctx;
590 struct nfs_lock_context *l_ctx;
591
592 ctx = get_nfs_open_context(nfs_file_open_context(src));
593 l_ctx = nfs_get_lock_context(ctx);
594 if (IS_ERR(l_ctx))
595 return PTR_ERR(l_ctx);
596
597 status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx,
598 FMODE_READ);
599 nfs_put_lock_context(l_ctx);
Olga Kornievskaiad826e5b2019-12-18 16:50:42 -0500600 if (status) {
601 if (status == -EAGAIN)
602 status = -NFS4ERR_BAD_STATEID;
Olga Kornievskaia04915672019-06-04 16:14:30 -0400603 return status;
Olga Kornievskaiad826e5b2019-12-18 16:50:42 -0500604 }
Olga Kornievskaia04915672019-06-04 16:14:30 -0400605
606 status = nfs4_call_sync(src_server->client, src_server, &msg,
607 &args->cna_seq_args, &res->cnr_seq_res, 0);
Olga Kornievskaia488b1702021-11-04 10:57:13 -0400608 trace_nfs4_copy_notify(file_inode(src), args, res, status);
Olga Kornievskaia04915672019-06-04 16:14:30 -0400609 if (status == -ENOTSUPP)
610 src_server->caps &= ~NFS_CAP_COPY_NOTIFY;
611
612 put_nfs_open_context(nfs_file_open_context(src));
613 return status;
614}
615
616int nfs42_proc_copy_notify(struct file *src, struct file *dst,
617 struct nfs42_copy_notify_res *res)
618{
619 struct nfs_server *src_server = NFS_SERVER(file_inode(src));
620 struct nfs42_copy_notify_args *args;
621 struct nfs4_exception exception = {
622 .inode = file_inode(src),
623 };
624 int status;
625
626 if (!(src_server->caps & NFS_CAP_COPY_NOTIFY))
627 return -EOPNOTSUPP;
628
629 args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_NOFS);
630 if (args == NULL)
631 return -ENOMEM;
632
633 args->cna_src_fh = NFS_FH(file_inode(src)),
634 args->cna_dst.nl4_type = NL4_NETADDR;
635 nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr);
636 exception.stateid = &args->cna_src_stateid;
637
638 do {
639 status = _nfs42_proc_copy_notify(src, dst, args, res);
640 if (status == -ENOTSUPP) {
641 status = -EOPNOTSUPP;
642 goto out;
643 }
644 status = nfs4_handle_exception(src_server, status, &exception);
645 } while (exception.retry);
646
647out:
648 kfree(args);
649 return status;
650}
651
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +0100652static loff_t _nfs42_proc_llseek(struct file *filep,
653 struct nfs_lock_context *lock, loff_t offset, int whence)
Anna Schumaker1c6dcbe2014-09-26 13:58:48 -0400654{
655 struct inode *inode = file_inode(filep);
656 struct nfs42_seek_args args = {
657 .sa_fh = NFS_FH(inode),
658 .sa_offset = offset,
659 .sa_what = (whence == SEEK_HOLE) ?
660 NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA,
661 };
662 struct nfs42_seek_res res;
663 struct rpc_message msg = {
664 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK],
665 .rpc_argp = &args,
666 .rpc_resp = &res,
667 };
668 struct nfs_server *server = NFS_SERVER(inode);
669 int status;
670
Anna Schumaker878ffa92014-10-23 14:00:54 -0400671 if (!nfs_server_capable(inode, NFS_CAP_SEEK))
Anna Schumaker1c6dcbe2014-09-26 13:58:48 -0400672 return -ENOTSUPP;
673
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +0100674 status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
675 lock, FMODE_READ);
Olga Kornievskaiad826e5b2019-12-18 16:50:42 -0500676 if (status) {
677 if (status == -EAGAIN)
678 status = -NFS4ERR_BAD_STATEID;
Anna Schumaker1c6dcbe2014-09-26 13:58:48 -0400679 return status;
Olga Kornievskaiad826e5b2019-12-18 16:50:42 -0500680 }
Anna Schumaker1c6dcbe2014-09-26 13:58:48 -0400681
Trond Myklebuste95fc4a2016-06-25 17:57:39 -0400682 status = nfs_filemap_write_and_wait_range(inode->i_mapping,
683 offset, LLONG_MAX);
684 if (status)
685 return status;
686
Anna Schumaker1c6dcbe2014-09-26 13:58:48 -0400687 status = nfs4_call_sync(server->client, server, &msg,
688 &args.seq_args, &res.seq_res, 0);
Olga Kornievskaiaf628d462021-11-04 10:57:08 -0400689 trace_nfs4_llseek(inode, &args, &res, status);
Anna Schumaker1c6dcbe2014-09-26 13:58:48 -0400690 if (status == -ENOTSUPP)
691 server->caps &= ~NFS_CAP_SEEK;
692 if (status)
693 return status;
694
Olga Kornievskaia73f5c882021-03-31 15:30:25 -0400695 if (whence == SEEK_DATA && res.sr_eof)
696 return -NFS4ERR_NXIO;
697 else
698 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
Anna Schumaker1c6dcbe2014-09-26 13:58:48 -0400699}
Trond Myklebustbe3a5d22015-06-23 19:51:55 +0800700
J. Bruce Fieldsbdcc2cd2015-07-23 11:08:43 -0400701loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
702{
703 struct nfs_server *server = NFS_SERVER(file_inode(filep));
704 struct nfs4_exception exception = { };
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +0100705 struct nfs_lock_context *lock;
J. Bruce Fields306a5542015-09-16 17:21:27 -0400706 loff_t err;
J. Bruce Fieldsbdcc2cd2015-07-23 11:08:43 -0400707
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +0100708 lock = nfs_get_lock_context(nfs_file_open_context(filep));
709 if (IS_ERR(lock))
710 return PTR_ERR(lock);
711
712 exception.inode = file_inode(filep);
713 exception.state = lock->open_context->state;
714
J. Bruce Fieldsbdcc2cd2015-07-23 11:08:43 -0400715 do {
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +0100716 err = _nfs42_proc_llseek(filep, lock, offset, whence);
J. Bruce Fields306a5542015-09-16 17:21:27 -0400717 if (err >= 0)
718 break;
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +0100719 if (err == -ENOTSUPP) {
720 err = -EOPNOTSUPP;
721 break;
722 }
J. Bruce Fieldsbdcc2cd2015-07-23 11:08:43 -0400723 err = nfs4_handle_exception(server, err, &exception);
724 } while (exception.retry);
725
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +0100726 nfs_put_lock_context(lock);
J. Bruce Fieldsbdcc2cd2015-07-23 11:08:43 -0400727 return err;
728}
729
730
Peng Tao1b4a4bd2015-06-23 19:51:56 +0800731static void
732nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
733{
734 struct nfs42_layoutstat_data *data = calldata;
Trond Myklebust9a0fe862016-08-19 15:33:12 -0400735 struct inode *inode = data->inode;
736 struct nfs_server *server = NFS_SERVER(inode);
737 struct pnfs_layout_hdr *lo;
Peng Tao1b4a4bd2015-06-23 19:51:56 +0800738
Trond Myklebust9a0fe862016-08-19 15:33:12 -0400739 spin_lock(&inode->i_lock);
740 lo = NFS_I(inode)->layout;
741 if (!pnfs_layout_is_valid(lo)) {
742 spin_unlock(&inode->i_lock);
743 rpc_exit(task, 0);
744 return;
745 }
746 nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid);
747 spin_unlock(&inode->i_lock);
Anna Schumaker6de7e122017-01-09 16:51:52 -0500748 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
749 &data->res.seq_res, task);
Peng Tao1b4a4bd2015-06-23 19:51:56 +0800750}
751
752static void
753nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
754{
755 struct nfs42_layoutstat_data *data = calldata;
Peng Tao68d264c2015-12-06 20:55:09 -0500756 struct inode *inode = data->inode;
757 struct pnfs_layout_hdr *lo;
Peng Tao1b4a4bd2015-06-23 19:51:56 +0800758
759 if (!nfs4_sequence_done(task, &data->res.seq_res))
760 return;
761
Trond Myklebust6c5a0d82015-06-27 11:45:46 -0400762 switch (task->tk_status) {
763 case 0:
Trond Myklebust638037b2020-08-04 11:02:44 -0400764 return;
Trond Myklebustcf61eb22018-05-29 22:06:08 -0400765 case -NFS4ERR_BADHANDLE:
766 case -ESTALE:
767 pnfs_destroy_layout(NFS_I(inode));
768 break;
Peng Tao68d264c2015-12-06 20:55:09 -0500769 case -NFS4ERR_EXPIRED:
Trond Myklebust206b3bb2016-08-05 12:16:19 -0400770 case -NFS4ERR_ADMIN_REVOKED:
771 case -NFS4ERR_DELEG_REVOKED:
Peng Tao68d264c2015-12-06 20:55:09 -0500772 case -NFS4ERR_STALE_STATEID:
Peng Tao68d264c2015-12-06 20:55:09 -0500773 case -NFS4ERR_BAD_STATEID:
774 spin_lock(&inode->i_lock);
775 lo = NFS_I(inode)->layout;
Trond Myklebust9a0fe862016-08-19 15:33:12 -0400776 if (pnfs_layout_is_valid(lo) &&
777 nfs4_stateid_match(&data->args.stateid,
Peng Tao68d264c2015-12-06 20:55:09 -0500778 &lo->plh_stateid)) {
779 LIST_HEAD(head);
780
781 /*
782 * Mark the bad layout state as invalid, then retry
783 * with the current stateid.
784 */
Trond Myklebust5f46be02016-07-22 11:25:27 -0400785 pnfs_mark_layout_stateid_invalid(lo, &head);
Peng Tao68d264c2015-12-06 20:55:09 -0500786 spin_unlock(&inode->i_lock);
787 pnfs_free_lseg_list(&head);
Trond Myklebust1f18b822017-04-29 10:10:17 -0400788 nfs_commit_inode(inode, 0);
Peng Tao68d264c2015-12-06 20:55:09 -0500789 } else
790 spin_unlock(&inode->i_lock);
791 break;
Trond Myklebust9a0fe862016-08-19 15:33:12 -0400792 case -NFS4ERR_OLD_STATEID:
793 spin_lock(&inode->i_lock);
794 lo = NFS_I(inode)->layout;
795 if (pnfs_layout_is_valid(lo) &&
796 nfs4_stateid_match_other(&data->args.stateid,
797 &lo->plh_stateid)) {
798 /* Do we need to delay before resending? */
799 if (!nfs4_stateid_is_newer(&lo->plh_stateid,
800 &data->args.stateid))
801 rpc_delay(task, HZ);
802 rpc_restart_call_prepare(task);
803 }
804 spin_unlock(&inode->i_lock);
805 break;
Trond Myklebust6c5a0d82015-06-27 11:45:46 -0400806 case -ENOTSUPP:
807 case -EOPNOTSUPP:
Peng Tao68d264c2015-12-06 20:55:09 -0500808 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
Trond Myklebust6c5a0d82015-06-27 11:45:46 -0400809 }
Trond Myklebust638037b2020-08-04 11:02:44 -0400810
811 trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status);
Peng Tao1b4a4bd2015-06-23 19:51:56 +0800812}
813
814static void
815nfs42_layoutstat_release(void *calldata)
816{
817 struct nfs42_layoutstat_data *data = calldata;
Trond Myklebust422c93c2016-10-06 17:53:20 -0400818 struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo;
819 int i;
Peng Tao87334082015-06-23 19:51:57 +0800820
Trond Myklebust422c93c2016-10-06 17:53:20 -0400821 for (i = 0; i < data->args.num_dev; i++) {
822 if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free)
823 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
824 }
Peng Tao1b4a4bd2015-06-23 19:51:56 +0800825
826 pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout);
Peng Tao1bfe3b22015-06-23 19:52:03 +0800827 smp_mb__before_atomic();
828 clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags);
829 smp_mb__after_atomic();
Peng Tao1b4a4bd2015-06-23 19:51:56 +0800830 nfs_iput_and_deactive(data->inode);
831 kfree(data->args.devinfo);
832 kfree(data);
833}
834
Trond Myklebustbe3a5d22015-06-23 19:51:55 +0800835static const struct rpc_call_ops nfs42_layoutstat_ops = {
Peng Tao1b4a4bd2015-06-23 19:51:56 +0800836 .rpc_call_prepare = nfs42_layoutstat_prepare,
837 .rpc_call_done = nfs42_layoutstat_done,
838 .rpc_release = nfs42_layoutstat_release,
Trond Myklebustbe3a5d22015-06-23 19:51:55 +0800839};
840
841int nfs42_proc_layoutstats_generic(struct nfs_server *server,
842 struct nfs42_layoutstat_data *data)
843{
844 struct rpc_message msg = {
845 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS],
846 .rpc_argp = &data->args,
847 .rpc_resp = &data->res,
848 };
849 struct rpc_task_setup task_setup = {
850 .rpc_client = server->client,
851 .rpc_message = &msg,
852 .callback_ops = &nfs42_layoutstat_ops,
853 .callback_data = data,
854 .flags = RPC_TASK_ASYNC,
855 };
856 struct rpc_task *task;
857
Peng Tao1b4a4bd2015-06-23 19:51:56 +0800858 data->inode = nfs_igrab_and_active(data->args.inode);
859 if (!data->inode) {
860 nfs42_layoutstat_release(data);
861 return -EAGAIN;
862 }
Anna Schumakerfba83f32018-05-04 16:22:50 -0400863 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
Trond Myklebustbe3a5d22015-06-23 19:51:55 +0800864 task = rpc_run_task(&task_setup);
865 if (IS_ERR(task))
866 return PTR_ERR(task);
Jeff Layton3f807e52016-10-04 00:07:43 -0400867 rpc_put_task(task);
Trond Myklebustbe3a5d22015-06-23 19:51:55 +0800868 return 0;
869}
Peng Taoe5341f32015-09-26 02:24:35 +0800870
Trond Myklebust3eb86092019-02-08 10:31:05 -0500871static struct nfs42_layouterror_data *
872nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags)
873{
874 struct nfs42_layouterror_data *data;
875 struct inode *inode = lseg->pls_layout->plh_inode;
876
877 data = kzalloc(sizeof(*data), gfp_flags);
878 if (data) {
879 data->args.inode = data->inode = nfs_igrab_and_active(inode);
880 if (data->inode) {
881 data->lseg = pnfs_get_lseg(lseg);
882 if (data->lseg)
883 return data;
884 nfs_iput_and_deactive(data->inode);
885 }
886 kfree(data);
887 }
888 return NULL;
889}
890
891static void
892nfs42_free_layouterror_data(struct nfs42_layouterror_data *data)
893{
894 pnfs_put_lseg(data->lseg);
895 nfs_iput_and_deactive(data->inode);
896 kfree(data);
897}
898
899static void
900nfs42_layouterror_prepare(struct rpc_task *task, void *calldata)
901{
902 struct nfs42_layouterror_data *data = calldata;
903 struct inode *inode = data->inode;
904 struct nfs_server *server = NFS_SERVER(inode);
905 struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
906 unsigned i;
907
908 spin_lock(&inode->i_lock);
909 if (!pnfs_layout_is_valid(lo)) {
910 spin_unlock(&inode->i_lock);
911 rpc_exit(task, 0);
912 return;
913 }
914 for (i = 0; i < data->args.num_errors; i++)
915 nfs4_stateid_copy(&data->args.errors[i].stateid,
916 &lo->plh_stateid);
917 spin_unlock(&inode->i_lock);
918 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
919 &data->res.seq_res, task);
920}
921
922static void
923nfs42_layouterror_done(struct rpc_task *task, void *calldata)
924{
925 struct nfs42_layouterror_data *data = calldata;
926 struct inode *inode = data->inode;
927 struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
928
929 if (!nfs4_sequence_done(task, &data->res.seq_res))
930 return;
931
932 switch (task->tk_status) {
933 case 0:
Trond Myklebust638037b2020-08-04 11:02:44 -0400934 return;
Trond Myklebust3eb86092019-02-08 10:31:05 -0500935 case -NFS4ERR_BADHANDLE:
936 case -ESTALE:
937 pnfs_destroy_layout(NFS_I(inode));
938 break;
939 case -NFS4ERR_EXPIRED:
940 case -NFS4ERR_ADMIN_REVOKED:
941 case -NFS4ERR_DELEG_REVOKED:
942 case -NFS4ERR_STALE_STATEID:
943 case -NFS4ERR_BAD_STATEID:
944 spin_lock(&inode->i_lock);
945 if (pnfs_layout_is_valid(lo) &&
946 nfs4_stateid_match(&data->args.errors[0].stateid,
947 &lo->plh_stateid)) {
948 LIST_HEAD(head);
949
950 /*
951 * Mark the bad layout state as invalid, then retry
952 * with the current stateid.
953 */
954 pnfs_mark_layout_stateid_invalid(lo, &head);
955 spin_unlock(&inode->i_lock);
956 pnfs_free_lseg_list(&head);
957 nfs_commit_inode(inode, 0);
958 } else
959 spin_unlock(&inode->i_lock);
960 break;
961 case -NFS4ERR_OLD_STATEID:
962 spin_lock(&inode->i_lock);
963 if (pnfs_layout_is_valid(lo) &&
964 nfs4_stateid_match_other(&data->args.errors[0].stateid,
965 &lo->plh_stateid)) {
966 /* Do we need to delay before resending? */
967 if (!nfs4_stateid_is_newer(&lo->plh_stateid,
968 &data->args.errors[0].stateid))
969 rpc_delay(task, HZ);
970 rpc_restart_call_prepare(task);
971 }
972 spin_unlock(&inode->i_lock);
973 break;
974 case -ENOTSUPP:
975 case -EOPNOTSUPP:
976 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR;
977 }
Trond Myklebust638037b2020-08-04 11:02:44 -0400978
979 trace_nfs4_layouterror(inode, &data->args.errors[0].stateid,
980 task->tk_status);
Trond Myklebust3eb86092019-02-08 10:31:05 -0500981}
982
983static void
984nfs42_layouterror_release(void *calldata)
985{
986 struct nfs42_layouterror_data *data = calldata;
987
988 nfs42_free_layouterror_data(data);
989}
990
991static const struct rpc_call_ops nfs42_layouterror_ops = {
992 .rpc_call_prepare = nfs42_layouterror_prepare,
993 .rpc_call_done = nfs42_layouterror_done,
994 .rpc_release = nfs42_layouterror_release,
995};
996
997int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
998 const struct nfs42_layout_error *errors, size_t n)
999{
1000 struct inode *inode = lseg->pls_layout->plh_inode;
1001 struct nfs42_layouterror_data *data;
1002 struct rpc_task *task;
1003 struct rpc_message msg = {
1004 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR],
1005 };
1006 struct rpc_task_setup task_setup = {
1007 .rpc_message = &msg,
1008 .callback_ops = &nfs42_layouterror_ops,
1009 .flags = RPC_TASK_ASYNC,
1010 };
1011 unsigned int i;
1012
1013 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR))
1014 return -EOPNOTSUPP;
1015 if (n > NFS42_LAYOUTERROR_MAX)
1016 return -EINVAL;
1017 data = nfs42_alloc_layouterror_data(lseg, GFP_NOFS);
1018 if (!data)
1019 return -ENOMEM;
1020 for (i = 0; i < n; i++) {
1021 data->args.errors[i] = errors[i];
1022 data->args.num_errors++;
1023 data->res.num_errors++;
1024 }
1025 msg.rpc_argp = &data->args;
1026 msg.rpc_resp = &data->res;
1027 task_setup.callback_data = data;
1028 task_setup.rpc_client = NFS_SERVER(inode)->client;
1029 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
1030 task = rpc_run_task(&task_setup);
1031 if (IS_ERR(task))
1032 return PTR_ERR(task);
1033 rpc_put_task(task);
1034 return 0;
1035}
1036EXPORT_SYMBOL_GPL(nfs42_proc_layouterror);
1037
Peng Taoe5341f32015-09-26 02:24:35 +08001038static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +01001039 struct file *dst_f, struct nfs_lock_context *src_lock,
1040 struct nfs_lock_context *dst_lock, loff_t src_offset,
1041 loff_t dst_offset, loff_t count)
Peng Taoe5341f32015-09-26 02:24:35 +08001042{
1043 struct inode *src_inode = file_inode(src_f);
1044 struct inode *dst_inode = file_inode(dst_f);
1045 struct nfs_server *server = NFS_SERVER(dst_inode);
Trond Myklebust34bf20c2021-12-27 14:40:52 -05001046 __u32 dst_bitmask[NFS_BITMASK_SZ];
Peng Taoe5341f32015-09-26 02:24:35 +08001047 struct nfs42_clone_args args = {
1048 .src_fh = NFS_FH(src_inode),
1049 .dst_fh = NFS_FH(dst_inode),
1050 .src_offset = src_offset,
1051 .dst_offset = dst_offset,
Christoph Hellwig9494b2c2015-11-13 09:38:45 +01001052 .count = count,
Trond Myklebust34bf20c2021-12-27 14:40:52 -05001053 .dst_bitmask = dst_bitmask,
Peng Taoe5341f32015-09-26 02:24:35 +08001054 };
1055 struct nfs42_clone_res res = {
1056 .server = server,
1057 };
1058 int status;
1059
1060 msg->rpc_argp = &args;
1061 msg->rpc_resp = &res;
1062
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +01001063 status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
1064 src_lock, FMODE_READ);
Olga Kornievskaiad826e5b2019-12-18 16:50:42 -05001065 if (status) {
1066 if (status == -EAGAIN)
1067 status = -NFS4ERR_BAD_STATEID;
Peng Taoe5341f32015-09-26 02:24:35 +08001068 return status;
Olga Kornievskaiad826e5b2019-12-18 16:50:42 -05001069 }
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +01001070 status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
1071 dst_lock, FMODE_WRITE);
Olga Kornievskaiad826e5b2019-12-18 16:50:42 -05001072 if (status) {
1073 if (status == -EAGAIN)
1074 status = -NFS4ERR_BAD_STATEID;
Peng Taoe5341f32015-09-26 02:24:35 +08001075 return status;
Olga Kornievskaiad826e5b2019-12-18 16:50:42 -05001076 }
Peng Taoe5341f32015-09-26 02:24:35 +08001077
1078 res.dst_fattr = nfs_alloc_fattr();
1079 if (!res.dst_fattr)
1080 return -ENOMEM;
1081
Trond Myklebust34bf20c2021-12-27 14:40:52 -05001082 nfs4_bitmask_set(dst_bitmask, server->cache_consistency_bitmask,
1083 dst_inode, NFS_INO_INVALID_BLOCKS);
1084
Peng Taoe5341f32015-09-26 02:24:35 +08001085 status = nfs4_call_sync(server->client, server, msg,
1086 &args.seq_args, &res.seq_res, 0);
Olga Kornievskaia2a65ca82021-11-04 10:57:11 -04001087 trace_nfs4_clone(src_inode, dst_inode, &args, status);
Trond Myklebust94d202d2021-04-14 10:10:09 -04001088 if (status == 0) {
1089 nfs42_copy_dest_done(dst_inode, dst_offset, count);
Peng Taoe5341f32015-09-26 02:24:35 +08001090 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
Trond Myklebust94d202d2021-04-14 10:10:09 -04001091 }
Peng Taoe5341f32015-09-26 02:24:35 +08001092
1093 kfree(res.dst_fattr);
1094 return status;
1095}
1096
1097int nfs42_proc_clone(struct file *src_f, struct file *dst_f,
1098 loff_t src_offset, loff_t dst_offset, loff_t count)
1099{
1100 struct rpc_message msg = {
1101 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE],
1102 };
1103 struct inode *inode = file_inode(src_f);
1104 struct nfs_server *server = NFS_SERVER(file_inode(src_f));
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +01001105 struct nfs_lock_context *src_lock;
1106 struct nfs_lock_context *dst_lock;
1107 struct nfs4_exception src_exception = { };
1108 struct nfs4_exception dst_exception = { };
1109 int err, err2;
Peng Taoe5341f32015-09-26 02:24:35 +08001110
1111 if (!nfs_server_capable(inode, NFS_CAP_CLONE))
1112 return -EOPNOTSUPP;
1113
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +01001114 src_lock = nfs_get_lock_context(nfs_file_open_context(src_f));
1115 if (IS_ERR(src_lock))
1116 return PTR_ERR(src_lock);
1117
1118 src_exception.inode = file_inode(src_f);
1119 src_exception.state = src_lock->open_context->state;
1120
1121 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f));
1122 if (IS_ERR(dst_lock)) {
1123 err = PTR_ERR(dst_lock);
1124 goto out_put_src_lock;
1125 }
1126
1127 dst_exception.inode = file_inode(dst_f);
1128 dst_exception.state = dst_lock->open_context->state;
1129
Peng Taoe5341f32015-09-26 02:24:35 +08001130 do {
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +01001131 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock,
1132 src_offset, dst_offset, count);
Peng Taoe5341f32015-09-26 02:24:35 +08001133 if (err == -ENOTSUPP || err == -EOPNOTSUPP) {
1134 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE;
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +01001135 err = -EOPNOTSUPP;
1136 break;
Peng Taoe5341f32015-09-26 02:24:35 +08001137 }
Peng Taoe5341f32015-09-26 02:24:35 +08001138
Christoph Hellwig4bdf87e2016-02-17 15:48:28 +01001139 err2 = nfs4_handle_exception(server, err, &src_exception);
1140 err = nfs4_handle_exception(server, err, &dst_exception);
1141 if (!err)
1142 err = err2;
1143 } while (src_exception.retry || dst_exception.retry);
1144
1145 nfs_put_lock_context(dst_lock);
1146out_put_src_lock:
1147 nfs_put_lock_context(src_lock);
Peng Taoe5341f32015-09-26 02:24:35 +08001148 return err;
Peng Taoe5341f32015-09-26 02:24:35 +08001149}
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001150
1151#define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
1152
1153static int _nfs42_proc_removexattr(struct inode *inode, const char *name)
1154{
1155 struct nfs_server *server = NFS_SERVER(inode);
1156 struct nfs42_removexattrargs args = {
1157 .fh = NFS_FH(inode),
1158 .xattr_name = name,
1159 };
1160 struct nfs42_removexattrres res;
1161 struct rpc_message msg = {
1162 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR],
1163 .rpc_argp = &args,
1164 .rpc_resp = &res,
1165 };
1166 int ret;
1167 unsigned long timestamp = jiffies;
1168
1169 ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
1170 &res.seq_res, 1);
1171 if (!ret)
1172 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
1173
1174 return ret;
1175}
1176
1177static int _nfs42_proc_setxattr(struct inode *inode, const char *name,
1178 const void *buf, size_t buflen, int flags)
1179{
1180 struct nfs_server *server = NFS_SERVER(inode);
1181 struct page *pages[NFS4XATTR_MAXPAGES];
1182 struct nfs42_setxattrargs arg = {
1183 .fh = NFS_FH(inode),
1184 .xattr_pages = pages,
1185 .xattr_len = buflen,
1186 .xattr_name = name,
1187 .xattr_flags = flags,
1188 };
1189 struct nfs42_setxattrres res;
1190 struct rpc_message msg = {
1191 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR],
1192 .rpc_argp = &arg,
1193 .rpc_resp = &res,
1194 };
1195 int ret, np;
1196 unsigned long timestamp = jiffies;
1197
1198 if (buflen > server->sxasize)
1199 return -ERANGE;
1200
1201 if (buflen > 0) {
1202 np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages);
1203 if (np < 0)
1204 return np;
1205 } else
1206 np = 0;
1207
1208 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1209 &res.seq_res, 1);
1210
1211 for (; np > 0; np--)
1212 put_page(pages[np - 1]);
1213
1214 if (!ret)
1215 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
1216
1217 return ret;
1218}
1219
1220static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name,
Frank van der Lindena1f26732020-12-02 00:34:11 +00001221 void *buf, size_t buflen, struct page **pages,
1222 size_t plen)
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001223{
1224 struct nfs_server *server = NFS_SERVER(inode);
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001225 struct nfs42_getxattrargs arg = {
1226 .fh = NFS_FH(inode),
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001227 .xattr_name = name,
1228 };
1229 struct nfs42_getxattrres res;
1230 struct rpc_message msg = {
1231 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR],
1232 .rpc_argp = &arg,
1233 .rpc_resp = &res,
1234 };
Frank van der Lindena1f26732020-12-02 00:34:11 +00001235 ssize_t ret;
1236
1237 arg.xattr_len = plen;
1238 arg.xattr_pages = pages;
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001239
1240 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1241 &res.seq_res, 0);
1242 if (ret < 0)
1243 return ret;
1244
Frank van der Linden95ad37f2020-06-23 22:39:04 +00001245 /*
1246 * Normally, the caching is done one layer up, but for successful
1247 * RPCS, always cache the result here, even if the caller was
1248 * just querying the length, or if the reply was too big for
1249 * the caller. This avoids a second RPC in the case of the
1250 * common query-alloc-retrieve cycle for xattrs.
1251 *
1252 * Note that xattr_len is always capped to XATTR_SIZE_MAX.
1253 */
1254
1255 nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len);
1256
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001257 if (buflen) {
1258 if (res.xattr_len > buflen)
1259 return -ERANGE;
1260 _copy_from_pages(buf, pages, 0, res.xattr_len);
1261 }
1262
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001263 return res.xattr_len;
1264}
1265
1266static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
1267 size_t buflen, u64 *cookiep, bool *eofp)
1268{
1269 struct nfs_server *server = NFS_SERVER(inode);
1270 struct page **pages;
1271 struct nfs42_listxattrsargs arg = {
1272 .fh = NFS_FH(inode),
1273 .cookie = *cookiep,
1274 };
1275 struct nfs42_listxattrsres res = {
1276 .eof = false,
1277 .xattr_buf = buf,
1278 .xattr_len = buflen,
1279 };
1280 struct rpc_message msg = {
1281 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS],
1282 .rpc_argp = &arg,
1283 .rpc_resp = &res,
1284 };
1285 u32 xdrlen;
Chuck Lever5482e092020-11-24 19:15:18 -05001286 int ret, np, i;
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001287
1288
Chuck Lever5482e092020-11-24 19:15:18 -05001289 ret = -ENOMEM;
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001290 res.scratch = alloc_page(GFP_KERNEL);
1291 if (!res.scratch)
Chuck Lever5482e092020-11-24 19:15:18 -05001292 goto out;
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001293
1294 xdrlen = nfs42_listxattr_xdrsize(buflen);
1295 if (xdrlen > server->lxasize)
1296 xdrlen = server->lxasize;
1297 np = xdrlen / PAGE_SIZE + 1;
1298
1299 pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL);
Chuck Lever5482e092020-11-24 19:15:18 -05001300 if (!pages)
1301 goto out_free_scratch;
1302 for (i = 0; i < np; i++) {
1303 pages[i] = alloc_page(GFP_KERNEL);
1304 if (!pages[i])
1305 goto out_free_pages;
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001306 }
1307
1308 arg.xattr_pages = pages;
1309 arg.count = xdrlen;
1310
1311 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1312 &res.seq_res, 0);
1313
1314 if (ret >= 0) {
1315 ret = res.copied;
1316 *cookiep = res.cookie;
1317 *eofp = res.eof;
1318 }
1319
Chuck Lever5482e092020-11-24 19:15:18 -05001320out_free_pages:
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001321 while (--np >= 0) {
1322 if (pages[np])
1323 __free_page(pages[np]);
1324 }
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001325 kfree(pages);
Chuck Lever5482e092020-11-24 19:15:18 -05001326out_free_scratch:
1327 __free_page(res.scratch);
1328out:
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001329 return ret;
1330
1331}
1332
1333ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
1334 void *buf, size_t buflen)
1335{
1336 struct nfs4_exception exception = { };
Frank van der Lindena1f26732020-12-02 00:34:11 +00001337 ssize_t err, np, i;
1338 struct page **pages;
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001339
Frank van der Lindena1f26732020-12-02 00:34:11 +00001340 np = nfs_page_array_len(0, buflen ?: XATTR_SIZE_MAX);
1341 pages = kmalloc_array(np, sizeof(*pages), GFP_KERNEL);
1342 if (!pages)
1343 return -ENOMEM;
1344
1345 for (i = 0; i < np; i++) {
1346 pages[i] = alloc_page(GFP_KERNEL);
1347 if (!pages[i]) {
1348 np = i + 1;
Colin Ian King7be9b382020-12-16 12:25:13 +00001349 err = -ENOMEM;
Frank van der Lindena1f26732020-12-02 00:34:11 +00001350 goto out;
1351 }
1352 }
1353
1354 /*
1355 * The GETXATTR op has no length field in the call, and the
1356 * xattr data is at the end of the reply.
1357 *
1358 * There is no downside in using the page-aligned length. It will
1359 * allow receiving and caching xattrs that are too large for the
1360 * caller but still fit in the page-rounded value.
1361 */
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001362 do {
Frank van der Lindena1f26732020-12-02 00:34:11 +00001363 err = _nfs42_proc_getxattr(inode, name, buf, buflen,
1364 pages, np * PAGE_SIZE);
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001365 if (err >= 0)
1366 break;
1367 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1368 &exception);
1369 } while (exception.retry);
1370
Frank van der Lindena1f26732020-12-02 00:34:11 +00001371out:
1372 while (--np >= 0)
1373 __free_page(pages[np]);
1374 kfree(pages);
1375
Frank van der Lindenc10a7512020-06-23 22:39:02 +00001376 return err;
1377}
1378
1379int nfs42_proc_setxattr(struct inode *inode, const char *name,
1380 const void *buf, size_t buflen, int flags)
1381{
1382 struct nfs4_exception exception = { };
1383 int err;
1384
1385 do {
1386 err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags);
1387 if (!err)
1388 break;
1389 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1390 &exception);
1391 } while (exception.retry);
1392
1393 return err;
1394}
1395
1396ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf,
1397 size_t buflen, u64 *cookiep, bool *eofp)
1398{
1399 struct nfs4_exception exception = { };
1400 ssize_t err;
1401
1402 do {
1403 err = _nfs42_proc_listxattrs(inode, buf, buflen,
1404 cookiep, eofp);
1405 if (err >= 0)
1406 break;
1407 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1408 &exception);
1409 } while (exception.retry);
1410
1411 return err;
1412}
1413
1414int nfs42_proc_removexattr(struct inode *inode, const char *name)
1415{
1416 struct nfs4_exception exception = { };
1417 int err;
1418
1419 do {
1420 err = _nfs42_proc_removexattr(inode, name);
1421 if (!err)
1422 break;
1423 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1424 &exception);
1425 } while (exception.retry);
1426
1427 return err;
1428}