Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (c) 2001 The Regents of the University of Michigan. |
| 3 | * All rights reserved. |
| 4 | * |
| 5 | * Kendrick Smith <kmsmith@umich.edu> |
| 6 | * Andy Adamson <andros@umich.edu> |
| 7 | * |
| 8 | * Redistribution and use in source and binary forms, with or without |
| 9 | * modification, are permitted provided that the following conditions |
| 10 | * are met: |
| 11 | * |
| 12 | * 1. Redistributions of source code must retain the above copyright |
| 13 | * notice, this list of conditions and the following disclaimer. |
| 14 | * 2. Redistributions in binary form must reproduce the above copyright |
| 15 | * notice, this list of conditions and the following disclaimer in the |
| 16 | * documentation and/or other materials provided with the distribution. |
| 17 | * 3. Neither the name of the University nor the names of its |
| 18 | * contributors may be used to endorse or promote products derived |
| 19 | * from this software without specific prior written permission. |
| 20 | * |
| 21 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED |
| 22 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
| 23 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| 24 | * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
| 25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 26 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 27 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR |
| 28 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
| 29 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
| 30 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 31 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 32 | */ |
| 33 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #include <linux/sunrpc/clnt.h> |
J. Bruce Fields | b5a1a81 | 2010-03-03 14:52:55 -0500 | [diff] [blame] | 35 | #include <linux/sunrpc/svc_xprt.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 36 | #include <linux/slab.h> |
Boaz Harrosh | 9a74af2 | 2009-12-03 20:30:56 +0200 | [diff] [blame] | 37 | #include "nfsd.h" |
| 38 | #include "state.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
| 40 | #define NFSDDBG_FACILITY NFSDDBG_PROC |
| 41 | |
| 42 | #define NFSPROC4_CB_NULL 0 |
| 43 | #define NFSPROC4_CB_COMPOUND 1 |
| 44 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | /* Index of predefined Linux callback client operations */ |
| 46 | |
| 47 | enum { |
Benny Halevy | 4be36ca | 2009-09-10 12:25:46 +0300 | [diff] [blame] | 48 | NFSPROC4_CLNT_CB_NULL = 0, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | NFSPROC4_CLNT_CB_RECALL, |
Andy Adamson | 38524ab | 2009-09-10 12:25:59 +0300 | [diff] [blame] | 50 | NFSPROC4_CLNT_CB_SEQUENCE, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | }; |
| 52 | |
| 53 | enum nfs_cb_opnum4 { |
| 54 | OP_CB_RECALL = 4, |
Andy Adamson | 38524ab | 2009-09-10 12:25:59 +0300 | [diff] [blame] | 55 | OP_CB_SEQUENCE = 11, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | }; |
| 57 | |
| 58 | #define NFS4_MAXTAGLEN 20 |
| 59 | |
| 60 | #define NFS4_enc_cb_null_sz 0 |
| 61 | #define NFS4_dec_cb_null_sz 0 |
| 62 | #define cb_compound_enc_hdr_sz 4 |
| 63 | #define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2)) |
Andy Adamson | 38524ab | 2009-09-10 12:25:59 +0300 | [diff] [blame] | 64 | #define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2) |
| 65 | #define cb_sequence_enc_sz (sessionid_sz + 4 + \ |
| 66 | 1 /* no referring calls list yet */) |
| 67 | #define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4) |
| 68 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | #define op_enc_sz 1 |
| 70 | #define op_dec_sz 2 |
| 71 | #define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2)) |
Benny Halevy | 0ac68d1 | 2007-07-17 04:04:37 -0700 | [diff] [blame] | 72 | #define enc_stateid_sz (NFS4_STATEID_SIZE >> 2) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | #define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \ |
Andy Adamson | 38524ab | 2009-09-10 12:25:59 +0300 | [diff] [blame] | 74 | cb_sequence_enc_sz + \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | 1 + enc_stateid_sz + \ |
| 76 | enc_nfs4_fh_sz) |
| 77 | |
| 78 | #define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \ |
Andy Adamson | 38524ab | 2009-09-10 12:25:59 +0300 | [diff] [blame] | 79 | cb_sequence_dec_sz + \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | op_dec_sz) |
| 81 | |
| 82 | /* |
| 83 | * Generic encode routines from fs/nfs/nfs4xdr.c |
| 84 | */ |
Al Viro | f00f328 | 2006-10-19 23:29:01 -0700 | [diff] [blame] | 85 | static inline __be32 * |
| 86 | xdr_writemem(__be32 *p, const void *ptr, int nbytes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | { |
| 88 | int tmp = XDR_QUADLEN(nbytes); |
| 89 | if (!tmp) |
| 90 | return p; |
| 91 | p[tmp-1] = 0; |
| 92 | memcpy(p, ptr, nbytes); |
| 93 | return p + tmp; |
| 94 | } |
| 95 | |
| 96 | #define WRITE32(n) *p++ = htonl(n) |
| 97 | #define WRITEMEM(ptr,nbytes) do { \ |
| 98 | p = xdr_writemem(p, ptr, nbytes); \ |
| 99 | } while (0) |
| 100 | #define RESERVE_SPACE(nbytes) do { \ |
| 101 | p = xdr_reserve_space(xdr, nbytes); \ |
Harvey Harrison | 8e24eea | 2008-04-30 00:55:09 -0700 | [diff] [blame] | 102 | if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __func__); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | BUG_ON(!p); \ |
| 104 | } while (0) |
| 105 | |
| 106 | /* |
| 107 | * Generic decode routines from fs/nfs/nfs4xdr.c |
| 108 | */ |
| 109 | #define DECODE_TAIL \ |
| 110 | status = 0; \ |
| 111 | out: \ |
| 112 | return status; \ |
| 113 | xdr_error: \ |
| 114 | dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \ |
| 115 | status = -EIO; \ |
| 116 | goto out |
| 117 | |
| 118 | #define READ32(x) (x) = ntohl(*p++) |
| 119 | #define READ64(x) do { \ |
| 120 | (x) = (u64)ntohl(*p++) << 32; \ |
| 121 | (x) |= ntohl(*p++); \ |
| 122 | } while (0) |
| 123 | #define READTIME(x) do { \ |
| 124 | p++; \ |
| 125 | (x.tv_sec) = ntohl(*p++); \ |
| 126 | (x.tv_nsec) = ntohl(*p++); \ |
| 127 | } while (0) |
| 128 | #define READ_BUF(nbytes) do { \ |
| 129 | p = xdr_inline_decode(xdr, nbytes); \ |
| 130 | if (!p) { \ |
Greg Banks | 3e3b480 | 2006-10-02 02:17:41 -0700 | [diff] [blame] | 131 | dprintk("NFSD: %s: reply buffer overflowed in line %d.\n", \ |
Harvey Harrison | 8e24eea | 2008-04-30 00:55:09 -0700 | [diff] [blame] | 132 | __func__, __LINE__); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | return -EIO; \ |
| 134 | } \ |
| 135 | } while (0) |
| 136 | |
| 137 | struct nfs4_cb_compound_hdr { |
Andy Adamson | 38524ab | 2009-09-10 12:25:59 +0300 | [diff] [blame] | 138 | /* args */ |
| 139 | u32 ident; /* minorversion 0 only */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | u32 nops; |
Andy Adamson | ef52bff | 2009-06-16 04:20:50 +0300 | [diff] [blame] | 141 | __be32 *nops_p; |
Andy Adamson | ab52ae6 | 2009-06-16 04:20:53 +0300 | [diff] [blame] | 142 | u32 minorversion; |
Andy Adamson | 38524ab | 2009-09-10 12:25:59 +0300 | [diff] [blame] | 143 | /* res */ |
| 144 | int status; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | }; |
| 146 | |
| 147 | static struct { |
| 148 | int stat; |
| 149 | int errno; |
| 150 | } nfs_cb_errtbl[] = { |
| 151 | { NFS4_OK, 0 }, |
| 152 | { NFS4ERR_PERM, EPERM }, |
| 153 | { NFS4ERR_NOENT, ENOENT }, |
| 154 | { NFS4ERR_IO, EIO }, |
| 155 | { NFS4ERR_NXIO, ENXIO }, |
| 156 | { NFS4ERR_ACCESS, EACCES }, |
| 157 | { NFS4ERR_EXIST, EEXIST }, |
| 158 | { NFS4ERR_XDEV, EXDEV }, |
| 159 | { NFS4ERR_NOTDIR, ENOTDIR }, |
| 160 | { NFS4ERR_ISDIR, EISDIR }, |
| 161 | { NFS4ERR_INVAL, EINVAL }, |
| 162 | { NFS4ERR_FBIG, EFBIG }, |
| 163 | { NFS4ERR_NOSPC, ENOSPC }, |
| 164 | { NFS4ERR_ROFS, EROFS }, |
| 165 | { NFS4ERR_MLINK, EMLINK }, |
| 166 | { NFS4ERR_NAMETOOLONG, ENAMETOOLONG }, |
| 167 | { NFS4ERR_NOTEMPTY, ENOTEMPTY }, |
| 168 | { NFS4ERR_DQUOT, EDQUOT }, |
| 169 | { NFS4ERR_STALE, ESTALE }, |
| 170 | { NFS4ERR_BADHANDLE, EBADHANDLE }, |
| 171 | { NFS4ERR_BAD_COOKIE, EBADCOOKIE }, |
| 172 | { NFS4ERR_NOTSUPP, ENOTSUPP }, |
| 173 | { NFS4ERR_TOOSMALL, ETOOSMALL }, |
| 174 | { NFS4ERR_SERVERFAULT, ESERVERFAULT }, |
| 175 | { NFS4ERR_BADTYPE, EBADTYPE }, |
| 176 | { NFS4ERR_LOCKED, EAGAIN }, |
| 177 | { NFS4ERR_RESOURCE, EREMOTEIO }, |
| 178 | { NFS4ERR_SYMLINK, ELOOP }, |
| 179 | { NFS4ERR_OP_ILLEGAL, EOPNOTSUPP }, |
| 180 | { NFS4ERR_DEADLOCK, EDEADLK }, |
| 181 | { -1, EIO } |
| 182 | }; |
| 183 | |
| 184 | static int |
| 185 | nfs_cb_stat_to_errno(int stat) |
| 186 | { |
| 187 | int i; |
| 188 | for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) { |
| 189 | if (nfs_cb_errtbl[i].stat == stat) |
| 190 | return nfs_cb_errtbl[i].errno; |
| 191 | } |
| 192 | /* If we cannot translate the error, the recovery routines should |
| 193 | * handle it. |
| 194 | * Note: remaining NFSv4 error codes have values > 10000, so should |
| 195 | * not conflict with native Linux error codes. |
| 196 | */ |
| 197 | return stat; |
| 198 | } |
| 199 | |
| 200 | /* |
| 201 | * XDR encode |
| 202 | */ |
| 203 | |
Andy Adamson | ef52bff | 2009-06-16 04:20:50 +0300 | [diff] [blame] | 204 | static void |
Benny Halevy | 9303bbd | 2010-05-25 09:50:23 +0300 | [diff] [blame] | 205 | encode_stateid(struct xdr_stream *xdr, stateid_t *sid) |
| 206 | { |
| 207 | __be32 *p; |
| 208 | |
| 209 | RESERVE_SPACE(sizeof(stateid_t)); |
| 210 | WRITE32(sid->si_generation); |
| 211 | WRITEMEM(&sid->si_opaque, sizeof(stateid_opaque_t)); |
| 212 | } |
| 213 | |
| 214 | static void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr) |
| 216 | { |
Al Viro | f00f328 | 2006-10-19 23:29:01 -0700 | [diff] [blame] | 217 | __be32 * p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | |
| 219 | RESERVE_SPACE(16); |
| 220 | WRITE32(0); /* tag length is always 0 */ |
Andy Adamson | ab52ae6 | 2009-06-16 04:20:53 +0300 | [diff] [blame] | 221 | WRITE32(hdr->minorversion); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | WRITE32(hdr->ident); |
Andy Adamson | ef52bff | 2009-06-16 04:20:50 +0300 | [diff] [blame] | 223 | hdr->nops_p = p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | WRITE32(hdr->nops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | } |
| 226 | |
Andy Adamson | ef52bff | 2009-06-16 04:20:50 +0300 | [diff] [blame] | 227 | static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr) |
| 228 | { |
| 229 | *hdr->nops_p = htonl(hdr->nops); |
| 230 | } |
| 231 | |
| 232 | static void |
| 233 | encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp, |
| 234 | struct nfs4_cb_compound_hdr *hdr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | { |
Al Viro | f00f328 | 2006-10-19 23:29:01 -0700 | [diff] [blame] | 236 | __be32 *p; |
J. Bruce Fields | b53d40c | 2009-05-01 19:50:00 -0400 | [diff] [blame] | 237 | int len = dp->dl_fh.fh_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | |
Benny Halevy | 9303bbd | 2010-05-25 09:50:23 +0300 | [diff] [blame] | 239 | RESERVE_SPACE(4); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | WRITE32(OP_CB_RECALL); |
Benny Halevy | 9303bbd | 2010-05-25 09:50:23 +0300 | [diff] [blame] | 241 | encode_stateid(xdr, &dp->dl_stateid); |
| 242 | RESERVE_SPACE(8 + (XDR_QUADLEN(len) << 2)); |
J. Bruce Fields | 6707bd3 | 2009-05-01 19:57:46 -0400 | [diff] [blame] | 243 | WRITE32(0); /* truncate optimization not implemented */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | WRITE32(len); |
J. Bruce Fields | b53d40c | 2009-05-01 19:50:00 -0400 | [diff] [blame] | 245 | WRITEMEM(&dp->dl_fh.fh_base, len); |
Andy Adamson | ef52bff | 2009-06-16 04:20:50 +0300 | [diff] [blame] | 246 | hdr->nops++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | } |
| 248 | |
Benny Halevy | 2af7358 | 2009-09-10 12:26:51 +0300 | [diff] [blame] | 249 | static void |
J. Bruce Fields | fb00392 | 2010-05-31 18:21:37 -0400 | [diff] [blame] | 250 | encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb, |
Benny Halevy | 2af7358 | 2009-09-10 12:26:51 +0300 | [diff] [blame] | 251 | struct nfs4_cb_compound_hdr *hdr) |
| 252 | { |
| 253 | __be32 *p; |
J. Bruce Fields | 90c8145 | 2010-06-14 17:49:37 -0400 | [diff] [blame] | 254 | struct nfsd4_session *ses = cb->cb_clp->cl_cb_session; |
Benny Halevy | 2af7358 | 2009-09-10 12:26:51 +0300 | [diff] [blame] | 255 | |
| 256 | if (hdr->minorversion == 0) |
| 257 | return; |
| 258 | |
| 259 | RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20); |
| 260 | |
| 261 | WRITE32(OP_CB_SEQUENCE); |
J. Bruce Fields | 90c8145 | 2010-06-14 17:49:37 -0400 | [diff] [blame] | 262 | WRITEMEM(ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN); |
J. Bruce Fields | ac7c46f2 | 2010-06-14 19:01:57 -0400 | [diff] [blame] | 263 | WRITE32(ses->se_cb_seq_nr); |
Benny Halevy | 2af7358 | 2009-09-10 12:26:51 +0300 | [diff] [blame] | 264 | WRITE32(0); /* slotid, always 0 */ |
| 265 | WRITE32(0); /* highest slotid always 0 */ |
| 266 | WRITE32(0); /* cachethis always 0 */ |
| 267 | WRITE32(0); /* FIXME: support referring_call_lists */ |
| 268 | hdr->nops++; |
| 269 | } |
| 270 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | static int |
Al Viro | f00f328 | 2006-10-19 23:29:01 -0700 | [diff] [blame] | 272 | nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | { |
| 274 | struct xdr_stream xdrs, *xdr = &xdrs; |
| 275 | |
| 276 | xdr_init_encode(&xdrs, &req->rq_snd_buf, p); |
| 277 | RESERVE_SPACE(0); |
| 278 | return 0; |
| 279 | } |
| 280 | |
| 281 | static int |
Ricardo Labiaga | 0421b5c | 2009-09-10 12:27:04 +0300 | [diff] [blame] | 282 | nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, |
J. Bruce Fields | fb00392 | 2010-05-31 18:21:37 -0400 | [diff] [blame] | 283 | struct nfsd4_callback *cb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | { |
| 285 | struct xdr_stream xdr; |
J. Bruce Fields | fb00392 | 2010-05-31 18:21:37 -0400 | [diff] [blame] | 286 | struct nfs4_delegation *args = cb->cb_op; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | struct nfs4_cb_compound_hdr hdr = { |
J. Bruce Fields | 6ff8da0 | 2010-06-04 20:04:45 -0400 | [diff] [blame] | 288 | .ident = cb->cb_clp->cl_cb_ident, |
J. Bruce Fields | fb00392 | 2010-05-31 18:21:37 -0400 | [diff] [blame] | 289 | .minorversion = cb->cb_minorversion, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | }; |
| 291 | |
| 292 | xdr_init_encode(&xdr, &req->rq_snd_buf, p); |
| 293 | encode_cb_compound_hdr(&xdr, &hdr); |
J. Bruce Fields | fb00392 | 2010-05-31 18:21:37 -0400 | [diff] [blame] | 294 | encode_cb_sequence(&xdr, cb, &hdr); |
Andy Adamson | ef52bff | 2009-06-16 04:20:50 +0300 | [diff] [blame] | 295 | encode_cb_recall(&xdr, args, &hdr); |
| 296 | encode_cb_nops(&hdr); |
| 297 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | } |
| 299 | |
| 300 | |
| 301 | static int |
| 302 | decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){ |
Al Viro | f00f328 | 2006-10-19 23:29:01 -0700 | [diff] [blame] | 303 | __be32 *p; |
J. Bruce Fields | 68a4b48 | 2010-05-27 09:30:39 -0400 | [diff] [blame] | 304 | u32 taglen; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | |
| 306 | READ_BUF(8); |
| 307 | READ32(hdr->status); |
J. Bruce Fields | 68a4b48 | 2010-05-27 09:30:39 -0400 | [diff] [blame] | 308 | /* We've got no use for the tag; ignore it: */ |
| 309 | READ32(taglen); |
| 310 | READ_BUF(taglen + 4); |
| 311 | p += XDR_QUADLEN(taglen); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | READ32(hdr->nops); |
| 313 | return 0; |
| 314 | } |
| 315 | |
| 316 | static int |
| 317 | decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected) |
| 318 | { |
Al Viro | f00f328 | 2006-10-19 23:29:01 -0700 | [diff] [blame] | 319 | __be32 *p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | u32 op; |
| 321 | int32_t nfserr; |
| 322 | |
| 323 | READ_BUF(8); |
| 324 | READ32(op); |
| 325 | if (op != expected) { |
| 326 | dprintk("NFSD: decode_cb_op_hdr: Callback server returned " |
| 327 | " operation %d but we issued a request for %d\n", |
| 328 | op, expected); |
| 329 | return -EIO; |
| 330 | } |
| 331 | READ32(nfserr); |
| 332 | if (nfserr != NFS_OK) |
| 333 | return -nfs_cb_stat_to_errno(nfserr); |
| 334 | return 0; |
| 335 | } |
| 336 | |
Benny Halevy | 2af7358 | 2009-09-10 12:26:51 +0300 | [diff] [blame] | 337 | /* |
| 338 | * Our current back channel implmentation supports a single backchannel |
| 339 | * with a single slot. |
| 340 | */ |
| 341 | static int |
J. Bruce Fields | fb00392 | 2010-05-31 18:21:37 -0400 | [diff] [blame] | 342 | decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb, |
Benny Halevy | 2af7358 | 2009-09-10 12:26:51 +0300 | [diff] [blame] | 343 | struct rpc_rqst *rqstp) |
| 344 | { |
J. Bruce Fields | 90c8145 | 2010-06-14 17:49:37 -0400 | [diff] [blame] | 345 | struct nfsd4_session *ses = cb->cb_clp->cl_cb_session; |
Benny Halevy | 2af7358 | 2009-09-10 12:26:51 +0300 | [diff] [blame] | 346 | struct nfs4_sessionid id; |
| 347 | int status; |
| 348 | u32 dummy; |
| 349 | __be32 *p; |
| 350 | |
J. Bruce Fields | fb00392 | 2010-05-31 18:21:37 -0400 | [diff] [blame] | 351 | if (cb->cb_minorversion == 0) |
Benny Halevy | 2af7358 | 2009-09-10 12:26:51 +0300 | [diff] [blame] | 352 | return 0; |
| 353 | |
| 354 | status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE); |
| 355 | if (status) |
| 356 | return status; |
| 357 | |
| 358 | /* |
| 359 | * If the server returns different values for sessionID, slotID or |
| 360 | * sequence number, the server is looney tunes. |
| 361 | */ |
| 362 | status = -ESERVERFAULT; |
| 363 | |
| 364 | READ_BUF(NFS4_MAX_SESSIONID_LEN + 16); |
| 365 | memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN); |
| 366 | p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN); |
J. Bruce Fields | 90c8145 | 2010-06-14 17:49:37 -0400 | [diff] [blame] | 367 | if (memcmp(id.data, ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) { |
Benny Halevy | 2af7358 | 2009-09-10 12:26:51 +0300 | [diff] [blame] | 368 | dprintk("%s Invalid session id\n", __func__); |
| 369 | goto out; |
| 370 | } |
| 371 | READ32(dummy); |
J. Bruce Fields | ac7c46f2 | 2010-06-14 19:01:57 -0400 | [diff] [blame] | 372 | if (dummy != ses->se_cb_seq_nr) { |
Benny Halevy | 2af7358 | 2009-09-10 12:26:51 +0300 | [diff] [blame] | 373 | dprintk("%s Invalid sequence number\n", __func__); |
| 374 | goto out; |
| 375 | } |
| 376 | READ32(dummy); /* slotid must be 0 */ |
| 377 | if (dummy != 0) { |
| 378 | dprintk("%s Invalid slotid\n", __func__); |
| 379 | goto out; |
| 380 | } |
| 381 | /* FIXME: process highest slotid and target highest slotid */ |
| 382 | status = 0; |
| 383 | out: |
| 384 | return status; |
| 385 | } |
| 386 | |
| 387 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | static int |
Al Viro | f00f328 | 2006-10-19 23:29:01 -0700 | [diff] [blame] | 389 | nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | { |
| 391 | return 0; |
| 392 | } |
| 393 | |
| 394 | static int |
Ricardo Labiaga | 0421b5c | 2009-09-10 12:27:04 +0300 | [diff] [blame] | 395 | nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p, |
J. Bruce Fields | fb00392 | 2010-05-31 18:21:37 -0400 | [diff] [blame] | 396 | struct nfsd4_callback *cb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | { |
| 398 | struct xdr_stream xdr; |
| 399 | struct nfs4_cb_compound_hdr hdr; |
| 400 | int status; |
| 401 | |
| 402 | xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); |
| 403 | status = decode_cb_compound_hdr(&xdr, &hdr); |
| 404 | if (status) |
| 405 | goto out; |
J. Bruce Fields | fb00392 | 2010-05-31 18:21:37 -0400 | [diff] [blame] | 406 | if (cb) { |
| 407 | status = decode_cb_sequence(&xdr, cb, rqstp); |
Ricardo Labiaga | 0421b5c | 2009-09-10 12:27:04 +0300 | [diff] [blame] | 408 | if (status) |
| 409 | goto out; |
| 410 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | status = decode_cb_op_hdr(&xdr, OP_CB_RECALL); |
| 412 | out: |
| 413 | return status; |
| 414 | } |
| 415 | |
| 416 | /* |
| 417 | * RPC procedure tables |
| 418 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | #define PROC(proc, call, argtype, restype) \ |
| 420 | [NFSPROC4_CLNT_##proc] = { \ |
| 421 | .p_proc = NFSPROC4_CB_##call, \ |
| 422 | .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \ |
| 423 | .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \ |
Chuck Lever | 2bea90d | 2007-03-29 16:47:53 -0400 | [diff] [blame] | 424 | .p_arglen = NFS4_##argtype##_sz, \ |
| 425 | .p_replen = NFS4_##restype##_sz, \ |
Chuck Lever | cc0175c | 2006-03-20 13:44:22 -0500 | [diff] [blame] | 426 | .p_statidx = NFSPROC4_CB_##call, \ |
| 427 | .p_name = #proc, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | } |
| 429 | |
NeilBrown | fd39ca9 | 2005-06-23 22:04:03 -0700 | [diff] [blame] | 430 | static struct rpc_procinfo nfs4_cb_procedures[] = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | PROC(CB_NULL, NULL, enc_cb_null, dec_cb_null), |
| 432 | PROC(CB_RECALL, COMPOUND, enc_cb_recall, dec_cb_recall), |
| 433 | }; |
| 434 | |
NeilBrown | fd39ca9 | 2005-06-23 22:04:03 -0700 | [diff] [blame] | 435 | static struct rpc_version nfs_cb_version4 = { |
J. Bruce Fields | b7299f4 | 2010-05-14 17:57:35 -0400 | [diff] [blame] | 436 | /* |
| 437 | * Note on the callback rpc program version number: despite language in rfc |
| 438 | * 5661 section 18.36.3 requiring servers to use 4 in this field, the |
| 439 | * official xdr descriptions for both 4.0 and 4.1 specify version 1, and |
| 440 | * in practice that appears to be what implementations use. The section |
| 441 | * 18.36.3 language is expected to be fixed in an erratum. |
| 442 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | .number = 1, |
Tobias Klauser | e8c96f8 | 2006-03-24 03:15:34 -0800 | [diff] [blame] | 444 | .nrprocs = ARRAY_SIZE(nfs4_cb_procedures), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | .procs = nfs4_cb_procedures |
| 446 | }; |
| 447 | |
| 448 | static struct rpc_version * nfs_cb_version[] = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | &nfs_cb_version4, |
| 450 | }; |
| 451 | |
Olga Kornievskaia | ff7d975 | 2008-03-28 16:04:56 -0400 | [diff] [blame] | 452 | static struct rpc_program cb_program; |
| 453 | |
| 454 | static struct rpc_stat cb_stats = { |
| 455 | .program = &cb_program |
| 456 | }; |
| 457 | |
| 458 | #define NFS4_CALLBACK 0x40000000 |
| 459 | static struct rpc_program cb_program = { |
| 460 | .name = "nfs4_cb", |
| 461 | .number = NFS4_CALLBACK, |
| 462 | .nrvers = ARRAY_SIZE(nfs_cb_version), |
| 463 | .version = nfs_cb_version, |
| 464 | .stats = &cb_stats, |
Olga Kornievskaia | 61054b1 | 2008-12-23 16:19:00 -0500 | [diff] [blame] | 465 | .pipe_dir_name = "/nfsd4_cb", |
Olga Kornievskaia | ff7d975 | 2008-03-28 16:04:56 -0400 | [diff] [blame] | 466 | }; |
| 467 | |
J. Bruce Fields | 595947a | 2009-03-05 17:18:10 -0500 | [diff] [blame] | 468 | static int max_cb_time(void) |
| 469 | { |
J. Bruce Fields | cf07d2e | 2010-02-28 23:20:19 -0500 | [diff] [blame] | 470 | return max(nfsd4_lease/10, (time_t)1) * HZ; |
J. Bruce Fields | 595947a | 2009-03-05 17:18:10 -0500 | [diff] [blame] | 471 | } |
| 472 | |
J. Bruce Fields | 2b47eec | 2007-07-27 18:06:50 -0400 | [diff] [blame] | 473 | /* Reference counting, callback cleanup, etc., all look racy as heck. |
J. Bruce Fields | 2bf2387 | 2010-03-08 12:37:27 -0500 | [diff] [blame] | 474 | * And why is cl_cb_set an atomic? */ |
J. Bruce Fields | 2b47eec | 2007-07-27 18:06:50 -0400 | [diff] [blame] | 475 | |
J. Bruce Fields | 07263f1 | 2010-05-31 19:09:40 -0400 | [diff] [blame] | 476 | int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn) |
J. Bruce Fields | 2b47eec | 2007-07-27 18:06:50 -0400 | [diff] [blame] | 477 | { |
Chuck Lever | ae5c794 | 2006-08-22 20:06:21 -0400 | [diff] [blame] | 478 | struct rpc_timeout timeparms = { |
J. Bruce Fields | 595947a | 2009-03-05 17:18:10 -0500 | [diff] [blame] | 479 | .to_initval = max_cb_time(), |
| 480 | .to_retries = 0, |
Chuck Lever | ae5c794 | 2006-08-22 20:06:21 -0400 | [diff] [blame] | 481 | }; |
Chuck Lever | ae5c794 | 2006-08-22 20:06:21 -0400 | [diff] [blame] | 482 | struct rpc_create_args args = { |
Pavel Emelyanov | c653ce3 | 2010-09-29 16:04:45 +0400 | [diff] [blame] | 483 | .net = &init_net, |
Alexandros Batsakis | 3ddc8bf | 2009-09-10 12:27:21 +0300 | [diff] [blame] | 484 | .protocol = XPRT_TRANSPORT_TCP, |
J. Bruce Fields | 07263f1 | 2010-05-31 19:09:40 -0400 | [diff] [blame] | 485 | .address = (struct sockaddr *) &conn->cb_addr, |
| 486 | .addrsize = conn->cb_addrlen, |
Chuck Lever | ae5c794 | 2006-08-22 20:06:21 -0400 | [diff] [blame] | 487 | .timeout = &timeparms, |
Olga Kornievskaia | ff7d975 | 2008-03-28 16:04:56 -0400 | [diff] [blame] | 488 | .program = &cb_program, |
J. Bruce Fields | 07263f1 | 2010-05-31 19:09:40 -0400 | [diff] [blame] | 489 | .prognumber = conn->cb_prog, |
J. Bruce Fields | b7299f4 | 2010-05-14 17:57:35 -0400 | [diff] [blame] | 490 | .version = 0, |
Olga Kornievskaia | 61054b1 | 2008-12-23 16:19:00 -0500 | [diff] [blame] | 491 | .authflavor = clp->cl_flavor, |
Olga Kornievskaia | b6b6152 | 2008-06-09 16:51:31 -0400 | [diff] [blame] | 492 | .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET), |
Olga Kornievskaia | 608207e | 2008-12-23 16:17:40 -0500 | [diff] [blame] | 493 | .client_name = clp->cl_principal, |
Chuck Lever | ae5c794 | 2006-08-22 20:06:21 -0400 | [diff] [blame] | 494 | }; |
J. Bruce Fields | 63c8671 | 2007-10-25 19:00:26 -0400 | [diff] [blame] | 495 | struct rpc_clnt *client; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | |
J. Bruce Fields | 418cd20a | 2009-02-22 15:52:13 -0800 | [diff] [blame] | 497 | if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5)) |
J. Bruce Fields | e1cab5a5 | 2009-02-23 10:45:27 -0800 | [diff] [blame] | 498 | return -EINVAL; |
J. Bruce Fields | 07263f1 | 2010-05-31 19:09:40 -0400 | [diff] [blame] | 499 | if (conn->cb_minorversion) { |
| 500 | args.bc_xprt = conn->cb_xprt; |
J. Bruce Fields | 8b5ce5c | 2010-10-19 17:31:50 -0400 | [diff] [blame] | 501 | args.prognumber = clp->cl_cb_session->se_cb_prog; |
Alexandros Batsakis | 3ddc8bf | 2009-09-10 12:27:21 +0300 | [diff] [blame] | 502 | args.protocol = XPRT_TRANSPORT_BC_TCP; |
| 503 | } |
Chuck Lever | ae5c794 | 2006-08-22 20:06:21 -0400 | [diff] [blame] | 504 | /* Create RPC client */ |
J. Bruce Fields | 63c8671 | 2007-10-25 19:00:26 -0400 | [diff] [blame] | 505 | client = rpc_create(&args); |
J. Bruce Fields | e1cab5a5 | 2009-02-23 10:45:27 -0800 | [diff] [blame] | 506 | if (IS_ERR(client)) { |
J. Bruce Fields | a601cae | 2009-02-22 16:43:45 -0800 | [diff] [blame] | 507 | dprintk("NFSD: couldn't create callback client: %ld\n", |
| 508 | PTR_ERR(client)); |
J. Bruce Fields | e1cab5a5 | 2009-02-23 10:45:27 -0800 | [diff] [blame] | 509 | return PTR_ERR(client); |
| 510 | } |
J. Bruce Fields | 6ff8da0 | 2010-06-04 20:04:45 -0400 | [diff] [blame] | 511 | clp->cl_cb_ident = conn->cb_ident; |
| 512 | clp->cl_cb_client = client; |
J. Bruce Fields | e1cab5a5 | 2009-02-23 10:45:27 -0800 | [diff] [blame] | 513 | return 0; |
J. Bruce Fields | a601cae | 2009-02-22 16:43:45 -0800 | [diff] [blame] | 514 | |
| 515 | } |
| 516 | |
J. Bruce Fields | ecdd03b | 2009-02-23 19:35:22 -0800 | [diff] [blame] | 517 | static void warn_no_callback_path(struct nfs4_client *clp, int reason) |
| 518 | { |
| 519 | dprintk("NFSD: warning: no callback path to client %.*s: error %d\n", |
| 520 | (int)clp->cl_name.len, clp->cl_name.data, reason); |
| 521 | } |
| 522 | |
J. Bruce Fields | e300a63 | 2009-03-05 15:01:11 -0500 | [diff] [blame] | 523 | static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata) |
| 524 | { |
J. Bruce Fields | cee277d | 2010-05-26 17:52:14 -0400 | [diff] [blame] | 525 | struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null); |
J. Bruce Fields | e300a63 | 2009-03-05 15:01:11 -0500 | [diff] [blame] | 526 | |
| 527 | if (task->tk_status) |
| 528 | warn_no_callback_path(clp, task->tk_status); |
| 529 | else |
J. Bruce Fields | 2bf2387 | 2010-03-08 12:37:27 -0500 | [diff] [blame] | 530 | atomic_set(&clp->cl_cb_set, 1); |
J. Bruce Fields | e300a63 | 2009-03-05 15:01:11 -0500 | [diff] [blame] | 531 | } |
| 532 | |
| 533 | static const struct rpc_call_ops nfsd4_cb_probe_ops = { |
J. Bruce Fields | cee277d | 2010-05-26 17:52:14 -0400 | [diff] [blame] | 534 | /* XXX: release method to ensure we set the cb channel down if |
| 535 | * necessary on early failure? */ |
J. Bruce Fields | e300a63 | 2009-03-05 15:01:11 -0500 | [diff] [blame] | 536 | .rpc_call_done = nfsd4_cb_probe_done, |
| 537 | }; |
| 538 | |
J. Bruce Fields | 80fc015 | 2009-09-15 18:07:35 -0400 | [diff] [blame] | 539 | static struct rpc_cred *callback_cred; |
J. Bruce Fields | 3cef9ab | 2009-02-23 21:42:10 -0800 | [diff] [blame] | 540 | |
J. Bruce Fields | 80fc015 | 2009-09-15 18:07:35 -0400 | [diff] [blame] | 541 | int set_callback_cred(void) |
| 542 | { |
J. Bruce Fields | 8d75da8 | 2010-03-03 16:13:29 -0500 | [diff] [blame] | 543 | if (callback_cred) |
| 544 | return 0; |
J. Bruce Fields | 80fc015 | 2009-09-15 18:07:35 -0400 | [diff] [blame] | 545 | callback_cred = rpc_lookup_machine_cred(); |
| 546 | if (!callback_cred) |
| 547 | return -ENOMEM; |
| 548 | return 0; |
J. Bruce Fields | 3cef9ab | 2009-02-23 21:42:10 -0800 | [diff] [blame] | 549 | } |
| 550 | |
J. Bruce Fields | cee277d | 2010-05-26 17:52:14 -0400 | [diff] [blame] | 551 | static struct workqueue_struct *callback_wq; |
J. Bruce Fields | 80fc015 | 2009-09-15 18:07:35 -0400 | [diff] [blame] | 552 | |
J. Bruce Fields | 5a3c9d7 | 2010-10-19 17:56:52 -0400 | [diff] [blame^] | 553 | static void do_probe_callback(struct nfs4_client *clp) |
J. Bruce Fields | a601cae | 2009-02-22 16:43:45 -0800 | [diff] [blame] | 554 | { |
J. Bruce Fields | cee277d | 2010-05-26 17:52:14 -0400 | [diff] [blame] | 555 | struct nfsd4_callback *cb = &clp->cl_cb_null; |
J. Bruce Fields | a601cae | 2009-02-22 16:43:45 -0800 | [diff] [blame] | 556 | |
J. Bruce Fields | fb00392 | 2010-05-31 18:21:37 -0400 | [diff] [blame] | 557 | cb->cb_op = NULL; |
| 558 | cb->cb_clp = clp; |
J. Bruce Fields | cee277d | 2010-05-26 17:52:14 -0400 | [diff] [blame] | 559 | |
| 560 | cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL]; |
| 561 | cb->cb_msg.rpc_argp = NULL; |
| 562 | cb->cb_msg.rpc_resp = NULL; |
| 563 | cb->cb_msg.rpc_cred = callback_cred; |
| 564 | |
| 565 | cb->cb_ops = &nfsd4_cb_probe_ops; |
| 566 | |
| 567 | queue_work(callback_wq, &cb->cb_work); |
J. Bruce Fields | 63c8671 | 2007-10-25 19:00:26 -0400 | [diff] [blame] | 568 | } |
| 569 | |
| 570 | /* |
J. Bruce Fields | 5a3c9d7 | 2010-10-19 17:56:52 -0400 | [diff] [blame^] | 571 | * Poke the callback thread to process any updates to the callback |
| 572 | * parameters, and send a null probe. |
J. Bruce Fields | 63c8671 | 2007-10-25 19:00:26 -0400 | [diff] [blame] | 573 | */ |
J. Bruce Fields | 5a3c9d7 | 2010-10-19 17:56:52 -0400 | [diff] [blame^] | 574 | void nfsd4_probe_callback(struct nfs4_client *clp) |
| 575 | { |
| 576 | set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags); |
| 577 | do_probe_callback(clp); |
| 578 | } |
| 579 | |
| 580 | void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn) |
J. Bruce Fields | 63c8671 | 2007-10-25 19:00:26 -0400 | [diff] [blame] | 581 | { |
J. Bruce Fields | 2bf2387 | 2010-03-08 12:37:27 -0500 | [diff] [blame] | 582 | BUG_ON(atomic_read(&clp->cl_cb_set)); |
J. Bruce Fields | 63c8671 | 2007-10-25 19:00:26 -0400 | [diff] [blame] | 583 | |
J. Bruce Fields | 6ff8da0 | 2010-06-04 20:04:45 -0400 | [diff] [blame] | 584 | spin_lock(&clp->cl_lock); |
| 585 | memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn)); |
J. Bruce Fields | 6ff8da0 | 2010-06-04 20:04:45 -0400 | [diff] [blame] | 586 | spin_unlock(&clp->cl_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | } |
| 588 | |
Ricardo Labiaga | 2a1d1b5 | 2009-09-10 12:26:38 +0300 | [diff] [blame] | 589 | /* |
| 590 | * There's currently a single callback channel slot. |
| 591 | * If the slot is available, then mark it busy. Otherwise, set the |
| 592 | * thread for sleeping on the callback RPC wait queue. |
| 593 | */ |
| 594 | static int nfsd41_cb_setup_sequence(struct nfs4_client *clp, |
| 595 | struct rpc_task *task) |
| 596 | { |
J. Bruce Fields | 90c8145 | 2010-06-14 17:49:37 -0400 | [diff] [blame] | 597 | u32 *ptr = (u32 *)clp->cl_cb_session->se_sessionid.data; |
Ricardo Labiaga | 2a1d1b5 | 2009-09-10 12:26:38 +0300 | [diff] [blame] | 598 | int status = 0; |
| 599 | |
| 600 | dprintk("%s: %u:%u:%u:%u\n", __func__, |
| 601 | ptr[0], ptr[1], ptr[2], ptr[3]); |
| 602 | |
| 603 | if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { |
| 604 | rpc_sleep_on(&clp->cl_cb_waitq, task, NULL); |
| 605 | dprintk("%s slot is busy\n", __func__); |
| 606 | status = -EAGAIN; |
| 607 | goto out; |
| 608 | } |
Ricardo Labiaga | 2a1d1b5 | 2009-09-10 12:26:38 +0300 | [diff] [blame] | 609 | out: |
| 610 | dprintk("%s status=%d\n", __func__, status); |
| 611 | return status; |
| 612 | } |
| 613 | |
| 614 | /* |
| 615 | * TODO: cb_sequence should support referring call lists, cachethis, multiple |
| 616 | * slots, and mark callback channel down on communication errors. |
| 617 | */ |
| 618 | static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) |
| 619 | { |
J. Bruce Fields | 5878453 | 2010-05-16 16:47:08 -0400 | [diff] [blame] | 620 | struct nfsd4_callback *cb = calldata; |
| 621 | struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall); |
Ricardo Labiaga | 2a1d1b5 | 2009-09-10 12:26:38 +0300 | [diff] [blame] | 622 | struct nfs4_client *clp = dp->dl_client; |
Ricardo Labiaga | 2a1d1b5 | 2009-09-10 12:26:38 +0300 | [diff] [blame] | 623 | u32 minorversion = clp->cl_cb_conn.cb_minorversion; |
| 624 | int status = 0; |
| 625 | |
J. Bruce Fields | fb00392 | 2010-05-31 18:21:37 -0400 | [diff] [blame] | 626 | cb->cb_minorversion = minorversion; |
Ricardo Labiaga | 2a1d1b5 | 2009-09-10 12:26:38 +0300 | [diff] [blame] | 627 | if (minorversion) { |
| 628 | status = nfsd41_cb_setup_sequence(clp, task); |
| 629 | if (status) { |
| 630 | if (status != -EAGAIN) { |
| 631 | /* terminate rpc task */ |
| 632 | task->tk_status = status; |
| 633 | task->tk_action = NULL; |
| 634 | } |
| 635 | return; |
| 636 | } |
| 637 | } |
| 638 | rpc_call_start(task); |
| 639 | } |
| 640 | |
Ricardo Labiaga | 0421b5c | 2009-09-10 12:27:04 +0300 | [diff] [blame] | 641 | static void nfsd4_cb_done(struct rpc_task *task, void *calldata) |
| 642 | { |
J. Bruce Fields | 5878453 | 2010-05-16 16:47:08 -0400 | [diff] [blame] | 643 | struct nfsd4_callback *cb = calldata; |
| 644 | struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall); |
Ricardo Labiaga | 0421b5c | 2009-09-10 12:27:04 +0300 | [diff] [blame] | 645 | struct nfs4_client *clp = dp->dl_client; |
| 646 | |
| 647 | dprintk("%s: minorversion=%d\n", __func__, |
| 648 | clp->cl_cb_conn.cb_minorversion); |
| 649 | |
| 650 | if (clp->cl_cb_conn.cb_minorversion) { |
| 651 | /* No need for lock, access serialized in nfsd4_cb_prepare */ |
J. Bruce Fields | ac7c46f2 | 2010-06-14 19:01:57 -0400 | [diff] [blame] | 652 | ++clp->cl_cb_session->se_cb_seq_nr; |
Ricardo Labiaga | 0421b5c | 2009-09-10 12:27:04 +0300 | [diff] [blame] | 653 | clear_bit(0, &clp->cl_cb_slot_busy); |
| 654 | rpc_wake_up_next(&clp->cl_cb_waitq); |
| 655 | dprintk("%s: freed slot, new seqid=%d\n", __func__, |
J. Bruce Fields | ac7c46f2 | 2010-06-14 19:01:57 -0400 | [diff] [blame] | 656 | clp->cl_cb_session->se_cb_seq_nr); |
Ricardo Labiaga | 0421b5c | 2009-09-10 12:27:04 +0300 | [diff] [blame] | 657 | |
| 658 | /* We're done looking into the sequence information */ |
| 659 | task->tk_msg.rpc_resp = NULL; |
| 660 | } |
| 661 | } |
| 662 | |
J. Bruce Fields | 4b21d0d | 2010-03-07 23:39:01 -0500 | [diff] [blame] | 663 | |
J. Bruce Fields | 63e4863 | 2009-05-01 22:36:55 -0400 | [diff] [blame] | 664 | static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) |
| 665 | { |
J. Bruce Fields | 5878453 | 2010-05-16 16:47:08 -0400 | [diff] [blame] | 666 | struct nfsd4_callback *cb = calldata; |
| 667 | struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall); |
J. Bruce Fields | 63e4863 | 2009-05-01 22:36:55 -0400 | [diff] [blame] | 668 | struct nfs4_client *clp = dp->dl_client; |
J. Bruce Fields | 4b21d0d | 2010-03-07 23:39:01 -0500 | [diff] [blame] | 669 | struct rpc_clnt *current_rpc_client = clp->cl_cb_client; |
J. Bruce Fields | 63e4863 | 2009-05-01 22:36:55 -0400 | [diff] [blame] | 670 | |
Ricardo Labiaga | 0421b5c | 2009-09-10 12:27:04 +0300 | [diff] [blame] | 671 | nfsd4_cb_done(task, calldata); |
| 672 | |
J. Bruce Fields | 4b21d0d | 2010-03-07 23:39:01 -0500 | [diff] [blame] | 673 | if (current_rpc_client == NULL) { |
| 674 | /* We're shutting down; give up. */ |
| 675 | /* XXX: err, or is it ok just to fall through |
| 676 | * and rpc_restart_call? */ |
| 677 | return; |
| 678 | } |
| 679 | |
J. Bruce Fields | 63e4863 | 2009-05-01 22:36:55 -0400 | [diff] [blame] | 680 | switch (task->tk_status) { |
J. Bruce Fields | 172c85d | 2010-05-30 11:53:12 -0400 | [diff] [blame] | 681 | case 0: |
| 682 | return; |
| 683 | case -EBADHANDLE: |
| 684 | case -NFS4ERR_BAD_STATEID: |
| 685 | /* Race: client probably got cb_recall |
| 686 | * before open reply granting delegation */ |
| 687 | break; |
| 688 | default: |
J. Bruce Fields | 63e4863 | 2009-05-01 22:36:55 -0400 | [diff] [blame] | 689 | /* Network partition? */ |
J. Bruce Fields | 2bf2387 | 2010-03-08 12:37:27 -0500 | [diff] [blame] | 690 | atomic_set(&clp->cl_cb_set, 0); |
J. Bruce Fields | 63e4863 | 2009-05-01 22:36:55 -0400 | [diff] [blame] | 691 | warn_no_callback_path(clp, task->tk_status); |
J. Bruce Fields | 4b21d0d | 2010-03-07 23:39:01 -0500 | [diff] [blame] | 692 | if (current_rpc_client != task->tk_client) { |
| 693 | /* queue a callback on the new connection: */ |
J. Bruce Fields | cba9ba4 | 2010-06-01 11:21:40 -0400 | [diff] [blame] | 694 | atomic_inc(&dp->dl_count); |
J. Bruce Fields | 4b21d0d | 2010-03-07 23:39:01 -0500 | [diff] [blame] | 695 | nfsd4_cb_recall(dp); |
| 696 | return; |
| 697 | } |
J. Bruce Fields | 63e4863 | 2009-05-01 22:36:55 -0400 | [diff] [blame] | 698 | } |
| 699 | if (dp->dl_retries--) { |
| 700 | rpc_delay(task, 2*HZ); |
| 701 | task->tk_status = 0; |
Boaz Harrosh | c18c821 | 2010-06-29 14:33:55 +0300 | [diff] [blame] | 702 | rpc_restart_call_prepare(task); |
Ricardo Labiaga | 0421b5c | 2009-09-10 12:27:04 +0300 | [diff] [blame] | 703 | return; |
J. Bruce Fields | 63e4863 | 2009-05-01 22:36:55 -0400 | [diff] [blame] | 704 | } else { |
J. Bruce Fields | 2bf2387 | 2010-03-08 12:37:27 -0500 | [diff] [blame] | 705 | atomic_set(&clp->cl_cb_set, 0); |
J. Bruce Fields | 63e4863 | 2009-05-01 22:36:55 -0400 | [diff] [blame] | 706 | warn_no_callback_path(clp, task->tk_status); |
| 707 | } |
| 708 | } |
| 709 | |
| 710 | static void nfsd4_cb_recall_release(void *calldata) |
| 711 | { |
J. Bruce Fields | 5878453 | 2010-05-16 16:47:08 -0400 | [diff] [blame] | 712 | struct nfsd4_callback *cb = calldata; |
| 713 | struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall); |
J. Bruce Fields | 63e4863 | 2009-05-01 22:36:55 -0400 | [diff] [blame] | 714 | |
| 715 | nfs4_put_delegation(dp); |
J. Bruce Fields | 63e4863 | 2009-05-01 22:36:55 -0400 | [diff] [blame] | 716 | } |
| 717 | |
| 718 | static const struct rpc_call_ops nfsd4_cb_recall_ops = { |
Ricardo Labiaga | 2a1d1b5 | 2009-09-10 12:26:38 +0300 | [diff] [blame] | 719 | .rpc_call_prepare = nfsd4_cb_prepare, |
J. Bruce Fields | 63e4863 | 2009-05-01 22:36:55 -0400 | [diff] [blame] | 720 | .rpc_call_done = nfsd4_cb_recall_done, |
| 721 | .rpc_release = nfsd4_cb_recall_release, |
| 722 | }; |
| 723 | |
J. Bruce Fields | b5a1a81 | 2010-03-03 14:52:55 -0500 | [diff] [blame] | 724 | int nfsd4_create_callback_queue(void) |
| 725 | { |
| 726 | callback_wq = create_singlethread_workqueue("nfsd4_callbacks"); |
| 727 | if (!callback_wq) |
| 728 | return -ENOMEM; |
| 729 | return 0; |
| 730 | } |
| 731 | |
| 732 | void nfsd4_destroy_callback_queue(void) |
| 733 | { |
| 734 | destroy_workqueue(callback_wq); |
| 735 | } |
| 736 | |
Benny Halevy | ab707e15 | 2010-05-12 00:14:06 +0300 | [diff] [blame] | 737 | /* must be called under the state lock */ |
J. Bruce Fields | 6ff8da0 | 2010-06-04 20:04:45 -0400 | [diff] [blame] | 738 | void nfsd4_shutdown_callback(struct nfs4_client *clp) |
J. Bruce Fields | b5a1a81 | 2010-03-03 14:52:55 -0500 | [diff] [blame] | 739 | { |
J. Bruce Fields | 6ff8da0 | 2010-06-04 20:04:45 -0400 | [diff] [blame] | 740 | set_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags); |
J. Bruce Fields | b5a1a81 | 2010-03-03 14:52:55 -0500 | [diff] [blame] | 741 | /* |
J. Bruce Fields | 6ff8da0 | 2010-06-04 20:04:45 -0400 | [diff] [blame] | 742 | * Note this won't actually result in a null callback; |
| 743 | * instead, nfsd4_do_callback_rpc() will detect the killed |
| 744 | * client, destroy the rpc client, and stop: |
J. Bruce Fields | b5a1a81 | 2010-03-03 14:52:55 -0500 | [diff] [blame] | 745 | */ |
J. Bruce Fields | 6ff8da0 | 2010-06-04 20:04:45 -0400 | [diff] [blame] | 746 | do_probe_callback(clp); |
J. Bruce Fields | b5a1a81 | 2010-03-03 14:52:55 -0500 | [diff] [blame] | 747 | flush_workqueue(callback_wq); |
J. Bruce Fields | b5a1a81 | 2010-03-03 14:52:55 -0500 | [diff] [blame] | 748 | } |
| 749 | |
J. Bruce Fields | 5878453 | 2010-05-16 16:47:08 -0400 | [diff] [blame] | 750 | void nfsd4_release_cb(struct nfsd4_callback *cb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 751 | { |
J. Bruce Fields | 5878453 | 2010-05-16 16:47:08 -0400 | [diff] [blame] | 752 | if (cb->cb_ops->rpc_release) |
| 753 | cb->cb_ops->rpc_release(cb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 754 | } |
J. Bruce Fields | b5a1a81 | 2010-03-03 14:52:55 -0500 | [diff] [blame] | 755 | |
J. Bruce Fields | 6ff8da0 | 2010-06-04 20:04:45 -0400 | [diff] [blame] | 756 | void nfsd4_process_cb_update(struct nfsd4_callback *cb) |
| 757 | { |
| 758 | struct nfs4_cb_conn conn; |
| 759 | struct nfs4_client *clp = cb->cb_clp; |
| 760 | int err; |
| 761 | |
| 762 | /* |
| 763 | * This is either an update, or the client dying; in either case, |
| 764 | * kill the old client: |
| 765 | */ |
| 766 | if (clp->cl_cb_client) { |
| 767 | rpc_shutdown_client(clp->cl_cb_client); |
| 768 | clp->cl_cb_client = NULL; |
| 769 | } |
| 770 | if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags)) |
| 771 | return; |
| 772 | spin_lock(&clp->cl_lock); |
| 773 | /* |
| 774 | * Only serialized callback code is allowed to clear these |
| 775 | * flags; main nfsd code can only set them: |
| 776 | */ |
| 777 | BUG_ON(!clp->cl_cb_flags); |
| 778 | clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags); |
| 779 | memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn)); |
| 780 | spin_unlock(&clp->cl_lock); |
| 781 | |
| 782 | err = setup_callback_client(clp, &conn); |
| 783 | if (err) |
| 784 | warn_no_callback_path(clp, err); |
| 785 | } |
| 786 | |
J. Bruce Fields | b5a1a81 | 2010-03-03 14:52:55 -0500 | [diff] [blame] | 787 | void nfsd4_do_callback_rpc(struct work_struct *w) |
| 788 | { |
J. Bruce Fields | 5878453 | 2010-05-16 16:47:08 -0400 | [diff] [blame] | 789 | struct nfsd4_callback *cb = container_of(w, struct nfsd4_callback, cb_work); |
J. Bruce Fields | fb00392 | 2010-05-31 18:21:37 -0400 | [diff] [blame] | 790 | struct nfs4_client *clp = cb->cb_clp; |
J. Bruce Fields | 6ff8da0 | 2010-06-04 20:04:45 -0400 | [diff] [blame] | 791 | struct rpc_clnt *clnt; |
J. Bruce Fields | b5a1a81 | 2010-03-03 14:52:55 -0500 | [diff] [blame] | 792 | |
J. Bruce Fields | 6ff8da0 | 2010-06-04 20:04:45 -0400 | [diff] [blame] | 793 | if (clp->cl_cb_flags) |
| 794 | nfsd4_process_cb_update(cb); |
| 795 | |
| 796 | clnt = clp->cl_cb_client; |
| 797 | if (!clnt) { |
| 798 | /* Callback channel broken, or client killed; give up: */ |
J. Bruce Fields | 5878453 | 2010-05-16 16:47:08 -0400 | [diff] [blame] | 799 | nfsd4_release_cb(cb); |
J. Bruce Fields | 6ff8da0 | 2010-06-04 20:04:45 -0400 | [diff] [blame] | 800 | return; |
J. Bruce Fields | 5878453 | 2010-05-16 16:47:08 -0400 | [diff] [blame] | 801 | } |
J. Bruce Fields | cee277d | 2010-05-26 17:52:14 -0400 | [diff] [blame] | 802 | rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN, |
| 803 | cb->cb_ops, cb); |
J. Bruce Fields | b5a1a81 | 2010-03-03 14:52:55 -0500 | [diff] [blame] | 804 | } |
| 805 | |
J. Bruce Fields | b5a1a81 | 2010-03-03 14:52:55 -0500 | [diff] [blame] | 806 | void nfsd4_cb_recall(struct nfs4_delegation *dp) |
| 807 | { |
J. Bruce Fields | 5878453 | 2010-05-16 16:47:08 -0400 | [diff] [blame] | 808 | struct nfsd4_callback *cb = &dp->dl_recall; |
| 809 | |
| 810 | dp->dl_retries = 1; |
J. Bruce Fields | fb00392 | 2010-05-31 18:21:37 -0400 | [diff] [blame] | 811 | cb->cb_op = dp; |
| 812 | cb->cb_clp = dp->dl_client; |
J. Bruce Fields | 5878453 | 2010-05-16 16:47:08 -0400 | [diff] [blame] | 813 | cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL]; |
J. Bruce Fields | fb00392 | 2010-05-31 18:21:37 -0400 | [diff] [blame] | 814 | cb->cb_msg.rpc_argp = cb; |
| 815 | cb->cb_msg.rpc_resp = cb; |
J. Bruce Fields | 5878453 | 2010-05-16 16:47:08 -0400 | [diff] [blame] | 816 | cb->cb_msg.rpc_cred = callback_cred; |
| 817 | |
| 818 | cb->cb_ops = &nfsd4_cb_recall_ops; |
| 819 | dp->dl_retries = 1; |
| 820 | |
J. Bruce Fields | b5a1a81 | 2010-03-03 14:52:55 -0500 | [diff] [blame] | 821 | queue_work(callback_wq, &dp->dl_recall.cb_work); |
| 822 | } |