blob: 80fd127239ce5e232c7eb923ebd625d436efa0a1 [file] [log] [blame]
David Howellsec268152007-04-26 15:49:28 -07001/* AFS cell and server record management
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
David Howells989782d2017-11-02 15:27:50 +00003 * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/slab.h>
David Howells00d3b7a2007-04-26 15:57:07 -070013#include <linux/key.h>
14#include <linux/ctype.h>
Wang Lei07567a52010-08-04 15:16:38 +010015#include <linux/dns_resolver.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040016#include <linux/sched.h>
David Howells3838d3e2017-11-02 15:27:47 +000017#include <linux/inet.h>
David Howells00d3b7a2007-04-26 15:57:07 -070018#include <keys/rxrpc-type.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include "internal.h"
20
David Howellsfe342cf2018-04-09 21:12:31 +010021static unsigned __read_mostly afs_cell_gc_delay = 10;
David Howells989782d2017-11-02 15:27:50 +000022
23static void afs_manage_cell(struct work_struct *);
24
25static void afs_dec_cells_outstanding(struct afs_net *net)
26{
27 if (atomic_dec_and_test(&net->cells_outstanding))
Peter Zijlstraab1fbe32018-03-15 11:42:28 +010028 wake_up_var(&net->cells_outstanding);
David Howells989782d2017-11-02 15:27:50 +000029}
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031/*
David Howells989782d2017-11-02 15:27:50 +000032 * Set the cell timer to fire after a given delay, assuming it's not already
33 * set for an earlier time.
34 */
35static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
36{
37 if (net->live) {
38 atomic_inc(&net->cells_outstanding);
39 if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
40 afs_dec_cells_outstanding(net);
41 }
42}
43
44/*
45 * Look up and get an activation reference on a cell record under RCU
46 * conditions. The caller must hold the RCU read lock.
47 */
48struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
49 const char *name, unsigned int namesz)
50{
51 struct afs_cell *cell = NULL;
52 struct rb_node *p;
53 int n, seq = 0, ret = 0;
54
55 _enter("%*.*s", namesz, namesz, name);
56
57 if (name && namesz == 0)
58 return ERR_PTR(-EINVAL);
59 if (namesz > AFS_MAXCELLNAME)
60 return ERR_PTR(-ENAMETOOLONG);
61
62 do {
63 /* Unfortunately, rbtree walking doesn't give reliable results
64 * under just the RCU read lock, so we have to check for
65 * changes.
66 */
67 if (cell)
68 afs_put_cell(net, cell);
69 cell = NULL;
70 ret = -ENOENT;
71
72 read_seqbegin_or_lock(&net->cells_lock, &seq);
73
74 if (!name) {
75 cell = rcu_dereference_raw(net->ws_cell);
76 if (cell) {
77 afs_get_cell(cell);
David Howellsfe342cf2018-04-09 21:12:31 +010078 break;
David Howells989782d2017-11-02 15:27:50 +000079 }
80 ret = -EDESTADDRREQ;
81 continue;
82 }
83
84 p = rcu_dereference_raw(net->cells.rb_node);
85 while (p) {
86 cell = rb_entry(p, struct afs_cell, net_node);
87
88 n = strncasecmp(cell->name, name,
89 min_t(size_t, cell->name_len, namesz));
90 if (n == 0)
91 n = cell->name_len - namesz;
92 if (n < 0) {
93 p = rcu_dereference_raw(p->rb_left);
94 } else if (n > 0) {
95 p = rcu_dereference_raw(p->rb_right);
96 } else {
97 if (atomic_inc_not_zero(&cell->usage)) {
98 ret = 0;
99 break;
100 }
101 /* We want to repeat the search, this time with
102 * the lock properly locked.
103 */
104 }
105 cell = NULL;
106 }
107
108 } while (need_seqretry(&net->cells_lock, seq));
109
110 done_seqretry(&net->cells_lock, seq);
111
112 return ret == 0 ? cell : ERR_PTR(ret);
113}
114
115/*
116 * Set up a cell record and fill in its name, VL server address list and
David Howells00d3b7a2007-04-26 15:57:07 -0700117 * allocate an anonymous key
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 */
David Howells989782d2017-11-02 15:27:50 +0000119static struct afs_cell *afs_alloc_cell(struct afs_net *net,
120 const char *name, unsigned int namelen,
121 const char *vllist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
123 struct afs_cell *cell;
David Howells989782d2017-11-02 15:27:50 +0000124 int i, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
David Howells989782d2017-11-02 15:27:50 +0000126 ASSERT(name);
127 if (namelen == 0)
128 return ERR_PTR(-EINVAL);
Wang Lei07567a52010-08-04 15:16:38 +0100129 if (namelen > AFS_MAXCELLNAME) {
130 _leave(" = -ENAMETOOLONG");
David Howells00d3b7a2007-04-26 15:57:07 -0700131 return ERR_PTR(-ENAMETOOLONG);
Wang Lei07567a52010-08-04 15:16:38 +0100132 }
David Howells37ab6362018-04-06 14:17:23 +0100133 if (namelen == 5 && memcmp(name, "@cell", 5) == 0)
134 return ERR_PTR(-EINVAL);
David Howells00d3b7a2007-04-26 15:57:07 -0700135
David Howells989782d2017-11-02 15:27:50 +0000136 _enter("%*.*s,%s", namelen, namelen, name, vllist);
137
138 cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 if (!cell) {
140 _leave(" = -ENOMEM");
David Howells08e0e7c2007-04-26 15:55:03 -0700141 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 }
143
David Howellsf044c882017-11-02 15:27:45 +0000144 cell->net = net;
David Howells989782d2017-11-02 15:27:50 +0000145 cell->name_len = namelen;
146 for (i = 0; i < namelen; i++)
147 cell->name[i] = tolower(name[i]);
148
149 atomic_set(&cell->usage, 2);
150 INIT_WORK(&cell->manager, afs_manage_cell);
David Howells8b2a4642017-11-02 15:27:50 +0000151 cell->flags = ((1 << AFS_CELL_FL_NOT_READY) |
152 (1 << AFS_CELL_FL_NO_LOOKUP_YET));
David Howellsd2ddc772017-11-02 15:27:50 +0000153 INIT_LIST_HEAD(&cell->proc_volumes);
154 rwlock_init(&cell->proc_lock);
David Howells8b2a4642017-11-02 15:27:50 +0000155 rwlock_init(&cell->vl_addrs_lock);
David Howells4d9df982017-11-02 15:27:47 +0000156
David Howells989782d2017-11-02 15:27:50 +0000157 /* Fill in the VL server list if we were given a list of addresses to
158 * use.
159 */
160 if (vllist) {
David Howells8b2a4642017-11-02 15:27:50 +0000161 struct afs_addr_list *alist;
Wang Lei07567a52010-08-04 15:16:38 +0100162
David Howells8b2a4642017-11-02 15:27:50 +0000163 alist = afs_parse_text_addrs(vllist, strlen(vllist), ':',
164 VL_SERVICE, AFS_VL_PORT);
165 if (IS_ERR(alist)) {
166 ret = PTR_ERR(alist);
167 goto parse_failed;
168 }
David Howells989782d2017-11-02 15:27:50 +0000169
David Howells8b2a4642017-11-02 15:27:50 +0000170 rcu_assign_pointer(cell->vl_addrs, alist);
David Howells989782d2017-11-02 15:27:50 +0000171 cell->dns_expiry = TIME64_MAX;
Wang Lei07567a52010-08-04 15:16:38 +0100172 }
173
David Howells00d3b7a2007-04-26 15:57:07 -0700174 _leave(" = %p", cell);
175 return cell;
176
David Howells8b2a4642017-11-02 15:27:50 +0000177parse_failed:
178 if (ret == -EINVAL)
179 printk(KERN_ERR "kAFS: bad VL server IP address\n");
David Howells00d3b7a2007-04-26 15:57:07 -0700180 kfree(cell);
181 _leave(" = %d", ret);
182 return ERR_PTR(ret);
183}
184
185/*
David Howells989782d2017-11-02 15:27:50 +0000186 * afs_lookup_cell - Look up or create a cell record.
David Howellsf044c882017-11-02 15:27:45 +0000187 * @net: The network namespace
David Howells989782d2017-11-02 15:27:50 +0000188 * @name: The name of the cell.
189 * @namesz: The strlen of the cell name.
190 * @vllist: A colon/comma separated list of numeric IP addresses or NULL.
191 * @excl: T if an error should be given if the cell name already exists.
192 *
193 * Look up a cell record by name and query the DNS for VL server addresses if
194 * needed. Note that that actual DNS query is punted off to the manager thread
195 * so that this function can return immediately if interrupted whilst allowing
196 * cell records to be shared even if not yet fully constructed.
David Howells00d3b7a2007-04-26 15:57:07 -0700197 */
David Howells989782d2017-11-02 15:27:50 +0000198struct afs_cell *afs_lookup_cell(struct afs_net *net,
199 const char *name, unsigned int namesz,
200 const char *vllist, bool excl)
David Howells00d3b7a2007-04-26 15:57:07 -0700201{
David Howells989782d2017-11-02 15:27:50 +0000202 struct afs_cell *cell, *candidate, *cursor;
203 struct rb_node *parent, **pp;
204 int ret, n;
David Howells00d3b7a2007-04-26 15:57:07 -0700205
David Howells989782d2017-11-02 15:27:50 +0000206 _enter("%s,%s", name, vllist);
David Howells00d3b7a2007-04-26 15:57:07 -0700207
David Howells989782d2017-11-02 15:27:50 +0000208 if (!excl) {
209 rcu_read_lock();
210 cell = afs_lookup_cell_rcu(net, name, namesz);
211 rcu_read_unlock();
Gustavo A. R. Silva68327952017-11-17 16:40:32 -0600212 if (!IS_ERR(cell))
David Howells989782d2017-11-02 15:27:50 +0000213 goto wait_for_cell;
David Howells00d3b7a2007-04-26 15:57:07 -0700214 }
215
David Howells989782d2017-11-02 15:27:50 +0000216 /* Assume we're probably going to create a cell and preallocate and
217 * mostly set up a candidate record. We can then use this to stash the
218 * name, the net namespace and VL server addresses.
219 *
220 * We also want to do this before we hold any locks as it may involve
221 * upcalling to userspace to make DNS queries.
222 */
223 candidate = afs_alloc_cell(net, name, namesz, vllist);
224 if (IS_ERR(candidate)) {
225 _leave(" = %ld", PTR_ERR(candidate));
226 return candidate;
227 }
228
229 /* Find the insertion point and check to see if someone else added a
230 * cell whilst we were allocating.
231 */
232 write_seqlock(&net->cells_lock);
233
234 pp = &net->cells.rb_node;
235 parent = NULL;
236 while (*pp) {
237 parent = *pp;
238 cursor = rb_entry(parent, struct afs_cell, net_node);
239
240 n = strncasecmp(cursor->name, name,
241 min_t(size_t, cursor->name_len, namesz));
242 if (n == 0)
243 n = cursor->name_len - namesz;
244 if (n < 0)
245 pp = &(*pp)->rb_left;
246 else if (n > 0)
247 pp = &(*pp)->rb_right;
248 else
249 goto cell_already_exists;
250 }
251
252 cell = candidate;
253 candidate = NULL;
254 rb_link_node_rcu(&cell->net_node, parent, pp);
255 rb_insert_color(&cell->net_node, &net->cells);
256 atomic_inc(&net->cells_outstanding);
257 write_sequnlock(&net->cells_lock);
258
259 queue_work(afs_wq, &cell->manager);
260
261wait_for_cell:
262 _debug("wait_for_cell");
263 ret = wait_on_bit(&cell->flags, AFS_CELL_FL_NOT_READY, TASK_INTERRUPTIBLE);
264 smp_rmb();
265
266 switch (READ_ONCE(cell->state)) {
267 case AFS_CELL_FAILED:
268 ret = cell->error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 goto error;
David Howells989782d2017-11-02 15:27:50 +0000270 default:
271 _debug("weird %u %d", cell->state, cell->error);
272 goto error;
273 case AFS_CELL_ACTIVE:
274 break;
275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
David Howells989782d2017-11-02 15:27:50 +0000277 _leave(" = %p [cell]", cell);
David Howells08e0e7c2007-04-26 15:55:03 -0700278 return cell;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
David Howells989782d2017-11-02 15:27:50 +0000280cell_already_exists:
281 _debug("cell exists");
282 cell = cursor;
283 if (excl) {
284 ret = -EEXIST;
285 } else {
David Howells989782d2017-11-02 15:27:50 +0000286 afs_get_cell(cursor);
287 ret = 0;
wangleibec5eb62010-08-11 09:38:04 +0100288 }
David Howells989782d2017-11-02 15:27:50 +0000289 write_sequnlock(&net->cells_lock);
290 kfree(candidate);
291 if (ret == 0)
292 goto wait_for_cell;
David Howells8b2a4642017-11-02 15:27:50 +0000293 goto error_noput;
David Howells989782d2017-11-02 15:27:50 +0000294error:
295 afs_put_cell(net, cell);
David Howells8b2a4642017-11-02 15:27:50 +0000296error_noput:
David Howells989782d2017-11-02 15:27:50 +0000297 _leave(" = %d [error]", ret);
298 return ERR_PTR(ret);
David Howellsec268152007-04-26 15:49:28 -0700299}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301/*
David Howells08e0e7c2007-04-26 15:55:03 -0700302 * set the root cell information
303 * - can be called with a module parameter string
304 * - can be called from a write to /proc/fs/afs/rootcell
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 */
David Howells989782d2017-11-02 15:27:50 +0000306int afs_cell_init(struct afs_net *net, const char *rootcell)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307{
308 struct afs_cell *old_root, *new_root;
David Howells989782d2017-11-02 15:27:50 +0000309 const char *cp, *vllist;
310 size_t len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311
312 _enter("");
313
314 if (!rootcell) {
315 /* module is loaded with no parameters, or built statically.
316 * - in the future we might initialize cell DB here.
317 */
David Howells08e0e7c2007-04-26 15:55:03 -0700318 _leave(" = 0 [no root]");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 return 0;
320 }
321
322 cp = strchr(rootcell, ':');
David Howells989782d2017-11-02 15:27:50 +0000323 if (!cp) {
Wang Lei07567a52010-08-04 15:16:38 +0100324 _debug("kAFS: no VL server IP addresses specified");
David Howells989782d2017-11-02 15:27:50 +0000325 vllist = NULL;
326 len = strlen(rootcell);
327 } else {
328 vllist = cp + 1;
329 len = cp - rootcell;
330 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332 /* allocate a cell record for the root cell */
David Howells989782d2017-11-02 15:27:50 +0000333 new_root = afs_lookup_cell(net, rootcell, len, vllist, false);
David Howells08e0e7c2007-04-26 15:55:03 -0700334 if (IS_ERR(new_root)) {
335 _leave(" = %ld", PTR_ERR(new_root));
336 return PTR_ERR(new_root);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 }
338
David Howells17814ae2018-04-09 21:12:31 +0100339 if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
340 afs_get_cell(new_root);
David Howells989782d2017-11-02 15:27:50 +0000341
David Howells08e0e7c2007-04-26 15:55:03 -0700342 /* install the new cell */
David Howells989782d2017-11-02 15:27:50 +0000343 write_seqlock(&net->cells_lock);
David Howells1588def2018-05-23 11:51:29 +0100344 old_root = rcu_access_pointer(net->ws_cell);
345 rcu_assign_pointer(net->ws_cell, new_root);
David Howells989782d2017-11-02 15:27:50 +0000346 write_sequnlock(&net->cells_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
David Howells989782d2017-11-02 15:27:50 +0000348 afs_put_cell(net, old_root);
David Howells08e0e7c2007-04-26 15:55:03 -0700349 _leave(" = 0");
350 return 0;
David Howellsec268152007-04-26 15:49:28 -0700351}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353/*
David Howells989782d2017-11-02 15:27:50 +0000354 * Update a cell's VL server address list from the DNS.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 */
David Howells989782d2017-11-02 15:27:50 +0000356static void afs_update_cell(struct afs_cell *cell)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357{
David Howells8b2a4642017-11-02 15:27:50 +0000358 struct afs_addr_list *alist, *old;
David Howells989782d2017-11-02 15:27:50 +0000359 time64_t now, expiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
David Howells989782d2017-11-02 15:27:50 +0000361 _enter("%s", cell->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
David Howells8b2a4642017-11-02 15:27:50 +0000363 alist = afs_dns_query(cell, &expiry);
364 if (IS_ERR(alist)) {
365 switch (PTR_ERR(alist)) {
366 case -ENODATA:
367 /* The DNS said that the cell does not exist */
368 set_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags);
369 clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
370 cell->dns_expiry = ktime_get_real_seconds() + 61;
371 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
David Howells8b2a4642017-11-02 15:27:50 +0000373 case -EAGAIN:
374 case -ECONNREFUSED:
375 default:
376 set_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
377 cell->dns_expiry = ktime_get_real_seconds() + 10;
378 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 }
380
David Howells8b2a4642017-11-02 15:27:50 +0000381 cell->error = -EDESTADDRREQ;
382 } else {
383 clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
384 clear_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
David Howells8b2a4642017-11-02 15:27:50 +0000386 /* Exclusion on changing vl_addrs is achieved by a
387 * non-reentrant work item.
388 */
389 old = rcu_dereference_protected(cell->vl_addrs, true);
390 rcu_assign_pointer(cell->vl_addrs, alist);
391 cell->dns_expiry = expiry;
wangleibec5eb62010-08-11 09:38:04 +0100392
David Howells8b2a4642017-11-02 15:27:50 +0000393 if (old)
394 afs_put_addrlist(old);
395 }
396
397 if (test_and_clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags))
398 wake_up_bit(&cell->flags, AFS_CELL_FL_NO_LOOKUP_YET);
wangleibec5eb62010-08-11 09:38:04 +0100399
David Howells989782d2017-11-02 15:27:50 +0000400 now = ktime_get_real_seconds();
David Howells8b2a4642017-11-02 15:27:50 +0000401 afs_set_cell_timer(cell->net, cell->dns_expiry - now);
David Howells989782d2017-11-02 15:27:50 +0000402 _leave("");
David Howellsec268152007-04-26 15:49:28 -0700403}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405/*
David Howells989782d2017-11-02 15:27:50 +0000406 * Destroy a cell record
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 */
David Howells989782d2017-11-02 15:27:50 +0000408static void afs_cell_destroy(struct rcu_head *rcu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409{
David Howells989782d2017-11-02 15:27:50 +0000410 struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
David Howells989782d2017-11-02 15:27:50 +0000412 _enter("%p{%s}", cell, cell->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
David Howells08e0e7c2007-04-26 15:55:03 -0700414 ASSERTCMP(atomic_read(&cell->usage), ==, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
David Howellsfe342cf2018-04-09 21:12:31 +0100416 afs_put_addrlist(rcu_access_pointer(cell->vl_addrs));
David Howells00d3b7a2007-04-26 15:57:07 -0700417 key_put(cell->anonymous_key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 kfree(cell);
419
420 _leave(" [destroyed]");
David Howellsec268152007-04-26 15:49:28 -0700421}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423/*
David Howells989782d2017-11-02 15:27:50 +0000424 * Queue the cell manager.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 */
David Howells989782d2017-11-02 15:27:50 +0000426static void afs_queue_cell_manager(struct afs_net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427{
David Howells989782d2017-11-02 15:27:50 +0000428 int outstanding = atomic_inc_return(&net->cells_outstanding);
429
430 _enter("%d", outstanding);
431
432 if (!queue_work(afs_wq, &net->cells_manager))
433 afs_dec_cells_outstanding(net);
434}
435
436/*
437 * Cell management timer. We have an increment on cells_outstanding that we
438 * need to pass along to the work item.
439 */
440void afs_cells_timer(struct timer_list *timer)
441{
442 struct afs_net *net = container_of(timer, struct afs_net, cells_timer);
443
444 _enter("");
445 if (!queue_work(afs_wq, &net->cells_manager))
446 afs_dec_cells_outstanding(net);
447}
448
449/*
David Howells8b2a4642017-11-02 15:27:50 +0000450 * Get a reference on a cell record.
451 */
452struct afs_cell *afs_get_cell(struct afs_cell *cell)
453{
454 atomic_inc(&cell->usage);
455 return cell;
456}
457
458/*
David Howells989782d2017-11-02 15:27:50 +0000459 * Drop a reference on a cell record.
460 */
461void afs_put_cell(struct afs_net *net, struct afs_cell *cell)
462{
463 time64_t now, expire_delay;
464
465 if (!cell)
466 return;
467
468 _enter("%s", cell->name);
469
470 now = ktime_get_real_seconds();
471 cell->last_inactive = now;
472 expire_delay = 0;
473 if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) &&
474 !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags))
475 expire_delay = afs_cell_gc_delay;
476
477 if (atomic_dec_return(&cell->usage) > 1)
478 return;
479
480 /* 'cell' may now be garbage collected. */
481 afs_set_cell_timer(net, expire_delay);
482}
483
484/*
485 * Allocate a key to use as a placeholder for anonymous user security.
486 */
487static int afs_alloc_anon_key(struct afs_cell *cell)
488{
489 struct key *key;
490 char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp;
491
492 /* Create a key to represent an anonymous user. */
493 memcpy(keyname, "afs@", 4);
494 dp = keyname + 4;
495 cp = cell->name;
496 do {
497 *dp++ = tolower(*cp);
498 } while (*cp++);
499
500 key = rxrpc_get_null_key(keyname);
501 if (IS_ERR(key))
502 return PTR_ERR(key);
503
504 cell->anonymous_key = key;
505
506 _debug("anon key %p{%x}",
507 cell->anonymous_key, key_serial(cell->anonymous_key));
508 return 0;
509}
510
511/*
512 * Activate a cell.
513 */
514static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
515{
516 int ret;
517
518 if (!cell->anonymous_key) {
519 ret = afs_alloc_anon_key(cell);
520 if (ret < 0)
521 return ret;
522 }
523
524#ifdef CONFIG_AFS_FSCACHE
525 cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index,
526 &afs_cell_cache_index_def,
David Howells402cb8d2018-04-04 13:41:28 +0100527 cell->name, strlen(cell->name),
528 NULL, 0,
David Howellsee1235a2018-04-04 13:41:28 +0100529 cell, 0, true);
David Howells989782d2017-11-02 15:27:50 +0000530#endif
531 ret = afs_proc_cell_setup(net, cell);
532 if (ret < 0)
533 return ret;
534 spin_lock(&net->proc_cells_lock);
535 list_add_tail(&cell->proc_link, &net->proc_cells);
536 spin_unlock(&net->proc_cells_lock);
537 return 0;
538}
539
540/*
541 * Deactivate a cell.
542 */
543static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
544{
545 _enter("%s", cell->name);
546
547 afs_proc_cell_remove(net, cell);
548
549 spin_lock(&net->proc_cells_lock);
550 list_del_init(&cell->proc_link);
551 spin_unlock(&net->proc_cells_lock);
552
553#ifdef CONFIG_AFS_FSCACHE
David Howells402cb8d2018-04-04 13:41:28 +0100554 fscache_relinquish_cookie(cell->cache, NULL, false);
David Howells989782d2017-11-02 15:27:50 +0000555 cell->cache = NULL;
556#endif
557
558 _leave("");
559}
560
561/*
562 * Manage a cell record, initialising and destroying it, maintaining its DNS
563 * records.
564 */
565static void afs_manage_cell(struct work_struct *work)
566{
567 struct afs_cell *cell = container_of(work, struct afs_cell, manager);
568 struct afs_net *net = cell->net;
569 bool deleted;
570 int ret, usage;
571
572 _enter("%s", cell->name);
573
574again:
575 _debug("state %u", cell->state);
576 switch (cell->state) {
577 case AFS_CELL_INACTIVE:
578 case AFS_CELL_FAILED:
579 write_seqlock(&net->cells_lock);
580 usage = 1;
581 deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0);
582 if (deleted)
583 rb_erase(&cell->net_node, &net->cells);
584 write_sequnlock(&net->cells_lock);
585 if (deleted)
586 goto final_destruction;
587 if (cell->state == AFS_CELL_FAILED)
588 goto done;
589 cell->state = AFS_CELL_UNSET;
590 goto again;
591
592 case AFS_CELL_UNSET:
593 cell->state = AFS_CELL_ACTIVATING;
594 goto again;
595
596 case AFS_CELL_ACTIVATING:
597 ret = afs_activate_cell(net, cell);
598 if (ret < 0)
599 goto activation_failed;
600
601 cell->state = AFS_CELL_ACTIVE;
602 smp_wmb();
603 clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags);
604 wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY);
605 goto again;
606
607 case AFS_CELL_ACTIVE:
608 if (atomic_read(&cell->usage) > 1) {
609 time64_t now = ktime_get_real_seconds();
610 if (cell->dns_expiry <= now && net->live)
611 afs_update_cell(cell);
612 goto done;
613 }
614 cell->state = AFS_CELL_DEACTIVATING;
615 goto again;
616
617 case AFS_CELL_DEACTIVATING:
618 set_bit(AFS_CELL_FL_NOT_READY, &cell->flags);
619 if (atomic_read(&cell->usage) > 1)
620 goto reverse_deactivation;
621 afs_deactivate_cell(net, cell);
622 cell->state = AFS_CELL_INACTIVE;
623 goto again;
624
625 default:
626 break;
627 }
628 _debug("bad state %u", cell->state);
629 BUG(); /* Unhandled state */
630
631activation_failed:
632 cell->error = ret;
633 afs_deactivate_cell(net, cell);
634
635 cell->state = AFS_CELL_FAILED;
636 smp_wmb();
637 if (test_and_clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags))
638 wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY);
639 goto again;
640
641reverse_deactivation:
642 cell->state = AFS_CELL_ACTIVE;
643 smp_wmb();
644 clear_bit(AFS_CELL_FL_NOT_READY, &cell->flags);
645 wake_up_bit(&cell->flags, AFS_CELL_FL_NOT_READY);
646 _leave(" [deact->act]");
647 return;
648
649done:
650 _leave(" [done %u]", cell->state);
651 return;
652
653final_destruction:
654 call_rcu(&cell->rcu, afs_cell_destroy);
655 afs_dec_cells_outstanding(net);
656 _leave(" [destruct %d]", atomic_read(&net->cells_outstanding));
657}
658
659/*
660 * Manage the records of cells known to a network namespace. This includes
661 * updating the DNS records and garbage collecting unused cells that were
662 * automatically added.
663 *
664 * Note that constructed cell records may only be removed from net->cells by
665 * this work item, so it is safe for this work item to stash a cursor pointing
666 * into the tree and then return to caller (provided it skips cells that are
667 * still under construction).
668 *
669 * Note also that we were given an increment on net->cells_outstanding by
670 * whoever queued us that we need to deal with before returning.
671 */
672void afs_manage_cells(struct work_struct *work)
673{
674 struct afs_net *net = container_of(work, struct afs_net, cells_manager);
675 struct rb_node *cursor;
676 time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
677 bool purging = !net->live;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679 _enter("");
680
David Howells989782d2017-11-02 15:27:50 +0000681 /* Trawl the cell database looking for cells that have expired from
682 * lack of use and cells whose DNS results have expired and dispatch
683 * their managers.
684 */
685 read_seqlock_excl(&net->cells_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
David Howells989782d2017-11-02 15:27:50 +0000687 for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
688 struct afs_cell *cell =
689 rb_entry(cursor, struct afs_cell, net_node);
690 unsigned usage;
691 bool sched_cell = false;
David Howells08e0e7c2007-04-26 15:55:03 -0700692
David Howells989782d2017-11-02 15:27:50 +0000693 usage = atomic_read(&cell->usage);
694 _debug("manage %s %u", cell->name, usage);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
David Howells989782d2017-11-02 15:27:50 +0000696 ASSERTCMP(usage, >=, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
David Howells989782d2017-11-02 15:27:50 +0000698 if (purging) {
699 if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
700 usage = atomic_dec_return(&cell->usage);
701 ASSERTCMP(usage, ==, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 }
703
David Howells989782d2017-11-02 15:27:50 +0000704 if (usage == 1) {
705 time64_t expire_at = cell->last_inactive;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
David Howells989782d2017-11-02 15:27:50 +0000707 if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) &&
708 !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags))
709 expire_at += afs_cell_gc_delay;
710 if (purging || expire_at <= now)
711 sched_cell = true;
712 else if (expire_at < next_manage)
713 next_manage = expire_at;
714 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715
David Howells989782d2017-11-02 15:27:50 +0000716 if (!purging) {
717 if (cell->dns_expiry <= now)
718 sched_cell = true;
719 else if (cell->dns_expiry <= next_manage)
720 next_manage = cell->dns_expiry;
721 }
722
723 if (sched_cell)
724 queue_work(afs_wq, &cell->manager);
725 }
726
727 read_sequnlock_excl(&net->cells_lock);
728
729 /* Update the timer on the way out. We have to pass an increment on
730 * cells_outstanding in the namespace that we are in to the timer or
731 * the work scheduler.
732 */
733 if (!purging && next_manage < TIME64_MAX) {
734 now = ktime_get_real_seconds();
735
736 if (next_manage - now <= 0) {
737 if (queue_work(afs_wq, &net->cells_manager))
738 atomic_inc(&net->cells_outstanding);
739 } else {
740 afs_set_cell_timer(net, next_manage - now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 }
742 }
743
David Howells989782d2017-11-02 15:27:50 +0000744 afs_dec_cells_outstanding(net);
745 _leave(" [%d]", atomic_read(&net->cells_outstanding));
746}
747
748/*
749 * Purge in-memory cell database.
750 */
751void afs_cell_purge(struct afs_net *net)
752{
753 struct afs_cell *ws;
754
755 _enter("");
756
757 write_seqlock(&net->cells_lock);
David Howells1588def2018-05-23 11:51:29 +0100758 ws = rcu_access_pointer(net->ws_cell);
759 RCU_INIT_POINTER(net->ws_cell, NULL);
David Howells989782d2017-11-02 15:27:50 +0000760 write_sequnlock(&net->cells_lock);
761 afs_put_cell(net, ws);
762
763 _debug("del timer");
764 if (del_timer_sync(&net->cells_timer))
765 atomic_dec(&net->cells_outstanding);
766
767 _debug("kick mgr");
768 afs_queue_cell_manager(net);
769
770 _debug("wait");
Peter Zijlstraab1fbe32018-03-15 11:42:28 +0100771 wait_var_event(&net->cells_outstanding,
772 !atomic_read(&net->cells_outstanding));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 _leave("");
David Howellsec268152007-04-26 15:49:28 -0700774}