blob: d38ee3c55a081ffa3ea73edf65a3ef6565bee088 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <andros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sunrpc/clnt.h>
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -050035#include <linux/sunrpc/svc_xprt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Boaz Harrosh9a74af22009-12-03 20:30:56 +020037#include "nfsd.h"
38#include "state.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40#define NFSDDBG_FACILITY NFSDDBG_PROC
41
42#define NFSPROC4_CB_NULL 0
43#define NFSPROC4_CB_COMPOUND 1
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/* Index of predefined Linux callback client operations */
46
47enum {
Benny Halevy4be36ca2009-09-10 12:25:46 +030048 NFSPROC4_CLNT_CB_NULL = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 NFSPROC4_CLNT_CB_RECALL,
Andy Adamson38524ab2009-09-10 12:25:59 +030050 NFSPROC4_CLNT_CB_SEQUENCE,
Linus Torvalds1da177e2005-04-16 15:20:36 -070051};
52
53enum nfs_cb_opnum4 {
54 OP_CB_RECALL = 4,
Andy Adamson38524ab2009-09-10 12:25:59 +030055 OP_CB_SEQUENCE = 11,
Linus Torvalds1da177e2005-04-16 15:20:36 -070056};
57
58#define NFS4_MAXTAGLEN 20
59
60#define NFS4_enc_cb_null_sz 0
61#define NFS4_dec_cb_null_sz 0
62#define cb_compound_enc_hdr_sz 4
63#define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2))
Andy Adamson38524ab2009-09-10 12:25:59 +030064#define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2)
65#define cb_sequence_enc_sz (sessionid_sz + 4 + \
66 1 /* no referring calls list yet */)
67#define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4)
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#define op_enc_sz 1
70#define op_dec_sz 2
71#define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2))
Benny Halevy0ac68d12007-07-17 04:04:37 -070072#define enc_stateid_sz (NFS4_STATEID_SIZE >> 2)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \
Andy Adamson38524ab2009-09-10 12:25:59 +030074 cb_sequence_enc_sz + \
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 1 + enc_stateid_sz + \
76 enc_nfs4_fh_sz)
77
78#define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \
Andy Adamson38524ab2009-09-10 12:25:59 +030079 cb_sequence_dec_sz + \
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 op_dec_sz)
81
82/*
83* Generic encode routines from fs/nfs/nfs4xdr.c
84*/
Al Virof00f3282006-10-19 23:29:01 -070085static inline __be32 *
86xdr_writemem(__be32 *p, const void *ptr, int nbytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
88 int tmp = XDR_QUADLEN(nbytes);
89 if (!tmp)
90 return p;
91 p[tmp-1] = 0;
92 memcpy(p, ptr, nbytes);
93 return p + tmp;
94}
95
96#define WRITE32(n) *p++ = htonl(n)
97#define WRITEMEM(ptr,nbytes) do { \
98 p = xdr_writemem(p, ptr, nbytes); \
99} while (0)
100#define RESERVE_SPACE(nbytes) do { \
101 p = xdr_reserve_space(xdr, nbytes); \
Harvey Harrison8e24eea2008-04-30 00:55:09 -0700102 if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __func__); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 BUG_ON(!p); \
104} while (0)
105
106/*
107 * Generic decode routines from fs/nfs/nfs4xdr.c
108 */
109#define DECODE_TAIL \
110 status = 0; \
111out: \
112 return status; \
113xdr_error: \
114 dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \
115 status = -EIO; \
116 goto out
117
118#define READ32(x) (x) = ntohl(*p++)
119#define READ64(x) do { \
120 (x) = (u64)ntohl(*p++) << 32; \
121 (x) |= ntohl(*p++); \
122} while (0)
123#define READTIME(x) do { \
124 p++; \
125 (x.tv_sec) = ntohl(*p++); \
126 (x.tv_nsec) = ntohl(*p++); \
127} while (0)
128#define READ_BUF(nbytes) do { \
129 p = xdr_inline_decode(xdr, nbytes); \
130 if (!p) { \
Greg Banks3e3b4802006-10-02 02:17:41 -0700131 dprintk("NFSD: %s: reply buffer overflowed in line %d.\n", \
Harvey Harrison8e24eea2008-04-30 00:55:09 -0700132 __func__, __LINE__); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 return -EIO; \
134 } \
135} while (0)
136
137struct nfs4_cb_compound_hdr {
Andy Adamson38524ab2009-09-10 12:25:59 +0300138 /* args */
139 u32 ident; /* minorversion 0 only */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 u32 nops;
Andy Adamsonef52bff2009-06-16 04:20:50 +0300141 __be32 *nops_p;
Andy Adamsonab52ae62009-06-16 04:20:53 +0300142 u32 minorversion;
Andy Adamson38524ab2009-09-10 12:25:59 +0300143 /* res */
144 int status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145};
146
147static struct {
148int stat;
149int errno;
150} nfs_cb_errtbl[] = {
151 { NFS4_OK, 0 },
152 { NFS4ERR_PERM, EPERM },
153 { NFS4ERR_NOENT, ENOENT },
154 { NFS4ERR_IO, EIO },
155 { NFS4ERR_NXIO, ENXIO },
156 { NFS4ERR_ACCESS, EACCES },
157 { NFS4ERR_EXIST, EEXIST },
158 { NFS4ERR_XDEV, EXDEV },
159 { NFS4ERR_NOTDIR, ENOTDIR },
160 { NFS4ERR_ISDIR, EISDIR },
161 { NFS4ERR_INVAL, EINVAL },
162 { NFS4ERR_FBIG, EFBIG },
163 { NFS4ERR_NOSPC, ENOSPC },
164 { NFS4ERR_ROFS, EROFS },
165 { NFS4ERR_MLINK, EMLINK },
166 { NFS4ERR_NAMETOOLONG, ENAMETOOLONG },
167 { NFS4ERR_NOTEMPTY, ENOTEMPTY },
168 { NFS4ERR_DQUOT, EDQUOT },
169 { NFS4ERR_STALE, ESTALE },
170 { NFS4ERR_BADHANDLE, EBADHANDLE },
171 { NFS4ERR_BAD_COOKIE, EBADCOOKIE },
172 { NFS4ERR_NOTSUPP, ENOTSUPP },
173 { NFS4ERR_TOOSMALL, ETOOSMALL },
174 { NFS4ERR_SERVERFAULT, ESERVERFAULT },
175 { NFS4ERR_BADTYPE, EBADTYPE },
176 { NFS4ERR_LOCKED, EAGAIN },
177 { NFS4ERR_RESOURCE, EREMOTEIO },
178 { NFS4ERR_SYMLINK, ELOOP },
179 { NFS4ERR_OP_ILLEGAL, EOPNOTSUPP },
180 { NFS4ERR_DEADLOCK, EDEADLK },
181 { -1, EIO }
182};
183
184static int
185nfs_cb_stat_to_errno(int stat)
186{
187 int i;
188 for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
189 if (nfs_cb_errtbl[i].stat == stat)
190 return nfs_cb_errtbl[i].errno;
191 }
192 /* If we cannot translate the error, the recovery routines should
193 * handle it.
194 * Note: remaining NFSv4 error codes have values > 10000, so should
195 * not conflict with native Linux error codes.
196 */
197 return stat;
198}
199
200/*
201 * XDR encode
202 */
203
Andy Adamsonef52bff2009-06-16 04:20:50 +0300204static void
Benny Halevy9303bbd2010-05-25 09:50:23 +0300205encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
206{
207 __be32 *p;
208
209 RESERVE_SPACE(sizeof(stateid_t));
210 WRITE32(sid->si_generation);
211 WRITEMEM(&sid->si_opaque, sizeof(stateid_opaque_t));
212}
213
214static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
216{
Al Virof00f3282006-10-19 23:29:01 -0700217 __be32 * p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
219 RESERVE_SPACE(16);
220 WRITE32(0); /* tag length is always 0 */
Andy Adamsonab52ae62009-06-16 04:20:53 +0300221 WRITE32(hdr->minorversion);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 WRITE32(hdr->ident);
Andy Adamsonef52bff2009-06-16 04:20:50 +0300223 hdr->nops_p = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 WRITE32(hdr->nops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225}
226
Andy Adamsonef52bff2009-06-16 04:20:50 +0300227static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
228{
229 *hdr->nops_p = htonl(hdr->nops);
230}
231
232static void
233encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
234 struct nfs4_cb_compound_hdr *hdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235{
Al Virof00f3282006-10-19 23:29:01 -0700236 __be32 *p;
J. Bruce Fieldsb53d40c2009-05-01 19:50:00 -0400237 int len = dp->dl_fh.fh_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Benny Halevy9303bbd2010-05-25 09:50:23 +0300239 RESERVE_SPACE(4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 WRITE32(OP_CB_RECALL);
Benny Halevy9303bbd2010-05-25 09:50:23 +0300241 encode_stateid(xdr, &dp->dl_stateid);
242 RESERVE_SPACE(8 + (XDR_QUADLEN(len) << 2));
J. Bruce Fields6707bd32009-05-01 19:57:46 -0400243 WRITE32(0); /* truncate optimization not implemented */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 WRITE32(len);
J. Bruce Fieldsb53d40c2009-05-01 19:50:00 -0400245 WRITEMEM(&dp->dl_fh.fh_base, len);
Andy Adamsonef52bff2009-06-16 04:20:50 +0300246 hdr->nops++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247}
248
Benny Halevy2af73582009-09-10 12:26:51 +0300249static void
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400250encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
Benny Halevy2af73582009-09-10 12:26:51 +0300251 struct nfs4_cb_compound_hdr *hdr)
252{
253 __be32 *p;
J. Bruce Fields90c81452010-06-14 17:49:37 -0400254 struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
Benny Halevy2af73582009-09-10 12:26:51 +0300255
256 if (hdr->minorversion == 0)
257 return;
258
259 RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20);
260
261 WRITE32(OP_CB_SEQUENCE);
J. Bruce Fields90c81452010-06-14 17:49:37 -0400262 WRITEMEM(ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN);
J. Bruce Fieldsac7c46f22010-06-14 19:01:57 -0400263 WRITE32(ses->se_cb_seq_nr);
Benny Halevy2af73582009-09-10 12:26:51 +0300264 WRITE32(0); /* slotid, always 0 */
265 WRITE32(0); /* highest slotid always 0 */
266 WRITE32(0); /* cachethis always 0 */
267 WRITE32(0); /* FIXME: support referring_call_lists */
268 hdr->nops++;
269}
270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271static int
Al Virof00f3282006-10-19 23:29:01 -0700272nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273{
274 struct xdr_stream xdrs, *xdr = &xdrs;
275
276 xdr_init_encode(&xdrs, &req->rq_snd_buf, p);
277 RESERVE_SPACE(0);
278 return 0;
279}
280
281static int
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300282nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400283 struct nfsd4_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284{
285 struct xdr_stream xdr;
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400286 struct nfs4_delegation *args = cb->cb_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 struct nfs4_cb_compound_hdr hdr = {
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400288 .ident = cb->cb_clp->cl_cb_ident,
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400289 .minorversion = cb->cb_minorversion,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 };
291
292 xdr_init_encode(&xdr, &req->rq_snd_buf, p);
293 encode_cb_compound_hdr(&xdr, &hdr);
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400294 encode_cb_sequence(&xdr, cb, &hdr);
Andy Adamsonef52bff2009-06-16 04:20:50 +0300295 encode_cb_recall(&xdr, args, &hdr);
296 encode_cb_nops(&hdr);
297 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298}
299
300
301static int
302decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){
Al Virof00f3282006-10-19 23:29:01 -0700303 __be32 *p;
J. Bruce Fields68a4b482010-05-27 09:30:39 -0400304 u32 taglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306 READ_BUF(8);
307 READ32(hdr->status);
J. Bruce Fields68a4b482010-05-27 09:30:39 -0400308 /* We've got no use for the tag; ignore it: */
309 READ32(taglen);
310 READ_BUF(taglen + 4);
311 p += XDR_QUADLEN(taglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 READ32(hdr->nops);
313 return 0;
314}
315
316static int
317decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
318{
Al Virof00f3282006-10-19 23:29:01 -0700319 __be32 *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 u32 op;
321 int32_t nfserr;
322
323 READ_BUF(8);
324 READ32(op);
325 if (op != expected) {
326 dprintk("NFSD: decode_cb_op_hdr: Callback server returned "
327 " operation %d but we issued a request for %d\n",
328 op, expected);
329 return -EIO;
330 }
331 READ32(nfserr);
332 if (nfserr != NFS_OK)
333 return -nfs_cb_stat_to_errno(nfserr);
334 return 0;
335}
336
Benny Halevy2af73582009-09-10 12:26:51 +0300337/*
338 * Our current back channel implmentation supports a single backchannel
339 * with a single slot.
340 */
341static int
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400342decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
Benny Halevy2af73582009-09-10 12:26:51 +0300343 struct rpc_rqst *rqstp)
344{
J. Bruce Fields90c81452010-06-14 17:49:37 -0400345 struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
Benny Halevy2af73582009-09-10 12:26:51 +0300346 struct nfs4_sessionid id;
347 int status;
348 u32 dummy;
349 __be32 *p;
350
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400351 if (cb->cb_minorversion == 0)
Benny Halevy2af73582009-09-10 12:26:51 +0300352 return 0;
353
354 status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE);
355 if (status)
356 return status;
357
358 /*
359 * If the server returns different values for sessionID, slotID or
360 * sequence number, the server is looney tunes.
361 */
362 status = -ESERVERFAULT;
363
364 READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
365 memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
366 p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
J. Bruce Fields90c81452010-06-14 17:49:37 -0400367 if (memcmp(id.data, ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
Benny Halevy2af73582009-09-10 12:26:51 +0300368 dprintk("%s Invalid session id\n", __func__);
369 goto out;
370 }
371 READ32(dummy);
J. Bruce Fieldsac7c46f22010-06-14 19:01:57 -0400372 if (dummy != ses->se_cb_seq_nr) {
Benny Halevy2af73582009-09-10 12:26:51 +0300373 dprintk("%s Invalid sequence number\n", __func__);
374 goto out;
375 }
376 READ32(dummy); /* slotid must be 0 */
377 if (dummy != 0) {
378 dprintk("%s Invalid slotid\n", __func__);
379 goto out;
380 }
381 /* FIXME: process highest slotid and target highest slotid */
382 status = 0;
383out:
384 return status;
385}
386
387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388static int
Al Virof00f3282006-10-19 23:29:01 -0700389nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390{
391 return 0;
392}
393
394static int
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300395nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400396 struct nfsd4_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397{
398 struct xdr_stream xdr;
399 struct nfs4_cb_compound_hdr hdr;
400 int status;
401
402 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
403 status = decode_cb_compound_hdr(&xdr, &hdr);
404 if (status)
405 goto out;
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400406 if (cb) {
407 status = decode_cb_sequence(&xdr, cb, rqstp);
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300408 if (status)
409 goto out;
410 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
412out:
413 return status;
414}
415
416/*
417 * RPC procedure tables
418 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419#define PROC(proc, call, argtype, restype) \
420[NFSPROC4_CLNT_##proc] = { \
421 .p_proc = NFSPROC4_CB_##call, \
422 .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \
423 .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \
Chuck Lever2bea90d2007-03-29 16:47:53 -0400424 .p_arglen = NFS4_##argtype##_sz, \
425 .p_replen = NFS4_##restype##_sz, \
Chuck Levercc0175c2006-03-20 13:44:22 -0500426 .p_statidx = NFSPROC4_CB_##call, \
427 .p_name = #proc, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428}
429
NeilBrownfd39ca92005-06-23 22:04:03 -0700430static struct rpc_procinfo nfs4_cb_procedures[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 PROC(CB_NULL, NULL, enc_cb_null, dec_cb_null),
432 PROC(CB_RECALL, COMPOUND, enc_cb_recall, dec_cb_recall),
433};
434
NeilBrownfd39ca92005-06-23 22:04:03 -0700435static struct rpc_version nfs_cb_version4 = {
J. Bruce Fieldsb7299f42010-05-14 17:57:35 -0400436/*
437 * Note on the callback rpc program version number: despite language in rfc
438 * 5661 section 18.36.3 requiring servers to use 4 in this field, the
439 * official xdr descriptions for both 4.0 and 4.1 specify version 1, and
440 * in practice that appears to be what implementations use. The section
441 * 18.36.3 language is expected to be fixed in an erratum.
442 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 .number = 1,
Tobias Klausere8c96f82006-03-24 03:15:34 -0800444 .nrprocs = ARRAY_SIZE(nfs4_cb_procedures),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 .procs = nfs4_cb_procedures
446};
447
448static struct rpc_version * nfs_cb_version[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 &nfs_cb_version4,
450};
451
Olga Kornievskaiaff7d9752008-03-28 16:04:56 -0400452static struct rpc_program cb_program;
453
454static struct rpc_stat cb_stats = {
455 .program = &cb_program
456};
457
458#define NFS4_CALLBACK 0x40000000
459static struct rpc_program cb_program = {
460 .name = "nfs4_cb",
461 .number = NFS4_CALLBACK,
462 .nrvers = ARRAY_SIZE(nfs_cb_version),
463 .version = nfs_cb_version,
464 .stats = &cb_stats,
Olga Kornievskaia61054b12008-12-23 16:19:00 -0500465 .pipe_dir_name = "/nfsd4_cb",
Olga Kornievskaiaff7d9752008-03-28 16:04:56 -0400466};
467
J. Bruce Fields595947a2009-03-05 17:18:10 -0500468static int max_cb_time(void)
469{
J. Bruce Fieldscf07d2e2010-02-28 23:20:19 -0500470 return max(nfsd4_lease/10, (time_t)1) * HZ;
J. Bruce Fields595947a2009-03-05 17:18:10 -0500471}
472
J. Bruce Fields2b47eec2007-07-27 18:06:50 -0400473/* Reference counting, callback cleanup, etc., all look racy as heck.
J. Bruce Fields2bf23872010-03-08 12:37:27 -0500474 * And why is cl_cb_set an atomic? */
J. Bruce Fields2b47eec2007-07-27 18:06:50 -0400475
J. Bruce Fields07263f12010-05-31 19:09:40 -0400476int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
J. Bruce Fields2b47eec2007-07-27 18:06:50 -0400477{
Chuck Leverae5c7942006-08-22 20:06:21 -0400478 struct rpc_timeout timeparms = {
J. Bruce Fields595947a2009-03-05 17:18:10 -0500479 .to_initval = max_cb_time(),
480 .to_retries = 0,
Chuck Leverae5c7942006-08-22 20:06:21 -0400481 };
Chuck Leverae5c7942006-08-22 20:06:21 -0400482 struct rpc_create_args args = {
Pavel Emelyanovc653ce32010-09-29 16:04:45 +0400483 .net = &init_net,
Alexandros Batsakis3ddc8bf2009-09-10 12:27:21 +0300484 .protocol = XPRT_TRANSPORT_TCP,
J. Bruce Fields07263f12010-05-31 19:09:40 -0400485 .address = (struct sockaddr *) &conn->cb_addr,
486 .addrsize = conn->cb_addrlen,
Chuck Leverae5c7942006-08-22 20:06:21 -0400487 .timeout = &timeparms,
Olga Kornievskaiaff7d9752008-03-28 16:04:56 -0400488 .program = &cb_program,
J. Bruce Fields07263f12010-05-31 19:09:40 -0400489 .prognumber = conn->cb_prog,
J. Bruce Fieldsb7299f42010-05-14 17:57:35 -0400490 .version = 0,
Olga Kornievskaia61054b12008-12-23 16:19:00 -0500491 .authflavor = clp->cl_flavor,
Olga Kornievskaiab6b61522008-06-09 16:51:31 -0400492 .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
Olga Kornievskaia608207e2008-12-23 16:17:40 -0500493 .client_name = clp->cl_principal,
Chuck Leverae5c7942006-08-22 20:06:21 -0400494 };
J. Bruce Fields63c86712007-10-25 19:00:26 -0400495 struct rpc_clnt *client;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
J. Bruce Fields418cd20a2009-02-22 15:52:13 -0800497 if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
J. Bruce Fieldse1cab5a52009-02-23 10:45:27 -0800498 return -EINVAL;
J. Bruce Fields07263f12010-05-31 19:09:40 -0400499 if (conn->cb_minorversion) {
500 args.bc_xprt = conn->cb_xprt;
J. Bruce Fields8b5ce5c2010-10-19 17:31:50 -0400501 args.prognumber = clp->cl_cb_session->se_cb_prog;
Alexandros Batsakis3ddc8bf2009-09-10 12:27:21 +0300502 args.protocol = XPRT_TRANSPORT_BC_TCP;
503 }
Chuck Leverae5c7942006-08-22 20:06:21 -0400504 /* Create RPC client */
J. Bruce Fields63c86712007-10-25 19:00:26 -0400505 client = rpc_create(&args);
J. Bruce Fieldse1cab5a52009-02-23 10:45:27 -0800506 if (IS_ERR(client)) {
J. Bruce Fieldsa601cae2009-02-22 16:43:45 -0800507 dprintk("NFSD: couldn't create callback client: %ld\n",
508 PTR_ERR(client));
J. Bruce Fieldse1cab5a52009-02-23 10:45:27 -0800509 return PTR_ERR(client);
510 }
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400511 clp->cl_cb_ident = conn->cb_ident;
512 clp->cl_cb_client = client;
J. Bruce Fieldse1cab5a52009-02-23 10:45:27 -0800513 return 0;
J. Bruce Fieldsa601cae2009-02-22 16:43:45 -0800514
515}
516
J. Bruce Fieldsecdd03b2009-02-23 19:35:22 -0800517static void warn_no_callback_path(struct nfs4_client *clp, int reason)
518{
519 dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
520 (int)clp->cl_name.len, clp->cl_name.data, reason);
521}
522
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500523static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
524{
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400525 struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500526
527 if (task->tk_status)
528 warn_no_callback_path(clp, task->tk_status);
529 else
J. Bruce Fields2bf23872010-03-08 12:37:27 -0500530 atomic_set(&clp->cl_cb_set, 1);
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500531}
532
533static const struct rpc_call_ops nfsd4_cb_probe_ops = {
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400534 /* XXX: release method to ensure we set the cb channel down if
535 * necessary on early failure? */
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500536 .rpc_call_done = nfsd4_cb_probe_done,
537};
538
J. Bruce Fields80fc0152009-09-15 18:07:35 -0400539static struct rpc_cred *callback_cred;
J. Bruce Fields3cef9ab2009-02-23 21:42:10 -0800540
J. Bruce Fields80fc0152009-09-15 18:07:35 -0400541int set_callback_cred(void)
542{
J. Bruce Fields8d75da82010-03-03 16:13:29 -0500543 if (callback_cred)
544 return 0;
J. Bruce Fields80fc0152009-09-15 18:07:35 -0400545 callback_cred = rpc_lookup_machine_cred();
546 if (!callback_cred)
547 return -ENOMEM;
548 return 0;
J. Bruce Fields3cef9ab2009-02-23 21:42:10 -0800549}
550
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400551static struct workqueue_struct *callback_wq;
J. Bruce Fields80fc0152009-09-15 18:07:35 -0400552
J. Bruce Fields5a3c9d72010-10-19 17:56:52 -0400553static void do_probe_callback(struct nfs4_client *clp)
J. Bruce Fieldsa601cae2009-02-22 16:43:45 -0800554{
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400555 struct nfsd4_callback *cb = &clp->cl_cb_null;
J. Bruce Fieldsa601cae2009-02-22 16:43:45 -0800556
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400557 cb->cb_op = NULL;
558 cb->cb_clp = clp;
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400559
560 cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL];
561 cb->cb_msg.rpc_argp = NULL;
562 cb->cb_msg.rpc_resp = NULL;
563 cb->cb_msg.rpc_cred = callback_cred;
564
565 cb->cb_ops = &nfsd4_cb_probe_ops;
566
567 queue_work(callback_wq, &cb->cb_work);
J. Bruce Fields63c86712007-10-25 19:00:26 -0400568}
569
570/*
J. Bruce Fields5a3c9d72010-10-19 17:56:52 -0400571 * Poke the callback thread to process any updates to the callback
572 * parameters, and send a null probe.
J. Bruce Fields63c86712007-10-25 19:00:26 -0400573 */
J. Bruce Fields5a3c9d72010-10-19 17:56:52 -0400574void nfsd4_probe_callback(struct nfs4_client *clp)
575{
576 set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
577 do_probe_callback(clp);
578}
579
580void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
J. Bruce Fields63c86712007-10-25 19:00:26 -0400581{
J. Bruce Fields2bf23872010-03-08 12:37:27 -0500582 BUG_ON(atomic_read(&clp->cl_cb_set));
J. Bruce Fields63c86712007-10-25 19:00:26 -0400583
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400584 spin_lock(&clp->cl_lock);
585 memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400586 spin_unlock(&clp->cl_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587}
588
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300589/*
590 * There's currently a single callback channel slot.
591 * If the slot is available, then mark it busy. Otherwise, set the
592 * thread for sleeping on the callback RPC wait queue.
593 */
594static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
595 struct rpc_task *task)
596{
J. Bruce Fields90c81452010-06-14 17:49:37 -0400597 u32 *ptr = (u32 *)clp->cl_cb_session->se_sessionid.data;
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300598 int status = 0;
599
600 dprintk("%s: %u:%u:%u:%u\n", __func__,
601 ptr[0], ptr[1], ptr[2], ptr[3]);
602
603 if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
604 rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
605 dprintk("%s slot is busy\n", __func__);
606 status = -EAGAIN;
607 goto out;
608 }
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300609out:
610 dprintk("%s status=%d\n", __func__, status);
611 return status;
612}
613
614/*
615 * TODO: cb_sequence should support referring call lists, cachethis, multiple
616 * slots, and mark callback channel down on communication errors.
617 */
618static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
619{
J. Bruce Fields58784532010-05-16 16:47:08 -0400620 struct nfsd4_callback *cb = calldata;
621 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300622 struct nfs4_client *clp = dp->dl_client;
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300623 u32 minorversion = clp->cl_cb_conn.cb_minorversion;
624 int status = 0;
625
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400626 cb->cb_minorversion = minorversion;
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300627 if (minorversion) {
628 status = nfsd41_cb_setup_sequence(clp, task);
629 if (status) {
630 if (status != -EAGAIN) {
631 /* terminate rpc task */
632 task->tk_status = status;
633 task->tk_action = NULL;
634 }
635 return;
636 }
637 }
638 rpc_call_start(task);
639}
640
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300641static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
642{
J. Bruce Fields58784532010-05-16 16:47:08 -0400643 struct nfsd4_callback *cb = calldata;
644 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300645 struct nfs4_client *clp = dp->dl_client;
646
647 dprintk("%s: minorversion=%d\n", __func__,
648 clp->cl_cb_conn.cb_minorversion);
649
650 if (clp->cl_cb_conn.cb_minorversion) {
651 /* No need for lock, access serialized in nfsd4_cb_prepare */
J. Bruce Fieldsac7c46f22010-06-14 19:01:57 -0400652 ++clp->cl_cb_session->se_cb_seq_nr;
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300653 clear_bit(0, &clp->cl_cb_slot_busy);
654 rpc_wake_up_next(&clp->cl_cb_waitq);
655 dprintk("%s: freed slot, new seqid=%d\n", __func__,
J. Bruce Fieldsac7c46f22010-06-14 19:01:57 -0400656 clp->cl_cb_session->se_cb_seq_nr);
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300657
658 /* We're done looking into the sequence information */
659 task->tk_msg.rpc_resp = NULL;
660 }
661}
662
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500663
J. Bruce Fields63e48632009-05-01 22:36:55 -0400664static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
665{
J. Bruce Fields58784532010-05-16 16:47:08 -0400666 struct nfsd4_callback *cb = calldata;
667 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400668 struct nfs4_client *clp = dp->dl_client;
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500669 struct rpc_clnt *current_rpc_client = clp->cl_cb_client;
J. Bruce Fields63e48632009-05-01 22:36:55 -0400670
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300671 nfsd4_cb_done(task, calldata);
672
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500673 if (current_rpc_client == NULL) {
674 /* We're shutting down; give up. */
675 /* XXX: err, or is it ok just to fall through
676 * and rpc_restart_call? */
677 return;
678 }
679
J. Bruce Fields63e48632009-05-01 22:36:55 -0400680 switch (task->tk_status) {
J. Bruce Fields172c85d2010-05-30 11:53:12 -0400681 case 0:
682 return;
683 case -EBADHANDLE:
684 case -NFS4ERR_BAD_STATEID:
685 /* Race: client probably got cb_recall
686 * before open reply granting delegation */
687 break;
688 default:
J. Bruce Fields63e48632009-05-01 22:36:55 -0400689 /* Network partition? */
J. Bruce Fields2bf23872010-03-08 12:37:27 -0500690 atomic_set(&clp->cl_cb_set, 0);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400691 warn_no_callback_path(clp, task->tk_status);
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500692 if (current_rpc_client != task->tk_client) {
693 /* queue a callback on the new connection: */
J. Bruce Fieldscba9ba42010-06-01 11:21:40 -0400694 atomic_inc(&dp->dl_count);
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500695 nfsd4_cb_recall(dp);
696 return;
697 }
J. Bruce Fields63e48632009-05-01 22:36:55 -0400698 }
699 if (dp->dl_retries--) {
700 rpc_delay(task, 2*HZ);
701 task->tk_status = 0;
Boaz Harroshc18c8212010-06-29 14:33:55 +0300702 rpc_restart_call_prepare(task);
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300703 return;
J. Bruce Fields63e48632009-05-01 22:36:55 -0400704 } else {
J. Bruce Fields2bf23872010-03-08 12:37:27 -0500705 atomic_set(&clp->cl_cb_set, 0);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400706 warn_no_callback_path(clp, task->tk_status);
707 }
708}
709
710static void nfsd4_cb_recall_release(void *calldata)
711{
J. Bruce Fields58784532010-05-16 16:47:08 -0400712 struct nfsd4_callback *cb = calldata;
713 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400714
715 nfs4_put_delegation(dp);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400716}
717
718static const struct rpc_call_ops nfsd4_cb_recall_ops = {
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300719 .rpc_call_prepare = nfsd4_cb_prepare,
J. Bruce Fields63e48632009-05-01 22:36:55 -0400720 .rpc_call_done = nfsd4_cb_recall_done,
721 .rpc_release = nfsd4_cb_recall_release,
722};
723
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500724int nfsd4_create_callback_queue(void)
725{
726 callback_wq = create_singlethread_workqueue("nfsd4_callbacks");
727 if (!callback_wq)
728 return -ENOMEM;
729 return 0;
730}
731
732void nfsd4_destroy_callback_queue(void)
733{
734 destroy_workqueue(callback_wq);
735}
736
Benny Halevyab707e152010-05-12 00:14:06 +0300737/* must be called under the state lock */
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400738void nfsd4_shutdown_callback(struct nfs4_client *clp)
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500739{
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400740 set_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500741 /*
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400742 * Note this won't actually result in a null callback;
743 * instead, nfsd4_do_callback_rpc() will detect the killed
744 * client, destroy the rpc client, and stop:
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500745 */
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400746 do_probe_callback(clp);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500747 flush_workqueue(callback_wq);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500748}
749
J. Bruce Fields58784532010-05-16 16:47:08 -0400750void nfsd4_release_cb(struct nfsd4_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751{
J. Bruce Fields58784532010-05-16 16:47:08 -0400752 if (cb->cb_ops->rpc_release)
753 cb->cb_ops->rpc_release(cb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754}
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500755
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400756void nfsd4_process_cb_update(struct nfsd4_callback *cb)
757{
758 struct nfs4_cb_conn conn;
759 struct nfs4_client *clp = cb->cb_clp;
760 int err;
761
762 /*
763 * This is either an update, or the client dying; in either case,
764 * kill the old client:
765 */
766 if (clp->cl_cb_client) {
767 rpc_shutdown_client(clp->cl_cb_client);
768 clp->cl_cb_client = NULL;
769 }
770 if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags))
771 return;
772 spin_lock(&clp->cl_lock);
773 /*
774 * Only serialized callback code is allowed to clear these
775 * flags; main nfsd code can only set them:
776 */
777 BUG_ON(!clp->cl_cb_flags);
778 clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
779 memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
780 spin_unlock(&clp->cl_lock);
781
782 err = setup_callback_client(clp, &conn);
783 if (err)
784 warn_no_callback_path(clp, err);
785}
786
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500787void nfsd4_do_callback_rpc(struct work_struct *w)
788{
J. Bruce Fields58784532010-05-16 16:47:08 -0400789 struct nfsd4_callback *cb = container_of(w, struct nfsd4_callback, cb_work);
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400790 struct nfs4_client *clp = cb->cb_clp;
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400791 struct rpc_clnt *clnt;
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500792
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400793 if (clp->cl_cb_flags)
794 nfsd4_process_cb_update(cb);
795
796 clnt = clp->cl_cb_client;
797 if (!clnt) {
798 /* Callback channel broken, or client killed; give up: */
J. Bruce Fields58784532010-05-16 16:47:08 -0400799 nfsd4_release_cb(cb);
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400800 return;
J. Bruce Fields58784532010-05-16 16:47:08 -0400801 }
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400802 rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
803 cb->cb_ops, cb);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500804}
805
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500806void nfsd4_cb_recall(struct nfs4_delegation *dp)
807{
J. Bruce Fields58784532010-05-16 16:47:08 -0400808 struct nfsd4_callback *cb = &dp->dl_recall;
809
810 dp->dl_retries = 1;
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400811 cb->cb_op = dp;
812 cb->cb_clp = dp->dl_client;
J. Bruce Fields58784532010-05-16 16:47:08 -0400813 cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL];
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400814 cb->cb_msg.rpc_argp = cb;
815 cb->cb_msg.rpc_resp = cb;
J. Bruce Fields58784532010-05-16 16:47:08 -0400816 cb->cb_msg.rpc_cred = callback_cred;
817
818 cb->cb_ops = &nfsd4_cb_recall_ops;
819 dp->dl_retries = 1;
820
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500821 queue_work(callback_wq, &dp->dl_recall.cb_work);
822}