blob: 3c90807476eb0e293a85940daff55c30bf636169 [file] [log] [blame]
Thomas Gleixnerb4d0d232019-05-20 19:08:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Howells5d135442009-09-02 09:14:00 +01002/* Key garbage collector
3 *
David Howells0c061b52011-08-22 14:09:36 +01004 * Copyright (C) 2009-2011 Red Hat, Inc. All Rights Reserved.
David Howells5d135442009-09-02 09:14:00 +01005 * Written by David Howells (dhowells@redhat.com)
David Howells5d135442009-09-02 09:14:00 +01006 */
7
David Howells8bc16de2011-08-22 14:09:11 +01008#include <linux/slab.h>
9#include <linux/security.h>
David Howells5d135442009-09-02 09:14:00 +010010#include <keys/keyring-type.h>
11#include "internal.h"
12
13/*
14 * Delay between key revocation/expiry in seconds
15 */
16unsigned key_gc_delay = 5 * 60;
17
18/*
David Howells8bc16de2011-08-22 14:09:11 +010019 * Reaper for unused keys.
20 */
David Howells0c061b52011-08-22 14:09:36 +010021static void key_garbage_collector(struct work_struct *work);
22DECLARE_WORK(key_gc_work, key_garbage_collector);
David Howells8bc16de2011-08-22 14:09:11 +010023
24/*
25 * Reaper for links from keyrings to dead keys.
David Howells5d135442009-09-02 09:14:00 +010026 */
Kees Cook24ed9602017-08-28 11:28:21 -070027static void key_gc_timer_func(struct timer_list *);
Kees Cook1d27e3e2017-10-04 16:27:04 -070028static DEFINE_TIMER(key_gc_timer, key_gc_timer_func);
David Howells0c061b52011-08-22 14:09:36 +010029
Baolin Wang074d5892017-11-15 16:38:45 +000030static time64_t key_gc_next_run = TIME64_MAX;
David Howells0c061b52011-08-22 14:09:36 +010031static struct key_type *key_gc_dead_keytype;
32
33static unsigned long key_gc_flags;
34#define KEY_GC_KEY_EXPIRED 0 /* A key expired and needs unlinking */
35#define KEY_GC_REAP_KEYTYPE 1 /* A keytype is being unregistered */
36#define KEY_GC_REAPING_KEYTYPE 2 /* Cleared when keytype reaped */
37
38
39/*
40 * Any key whose type gets unregistered will be re-typed to this if it can't be
41 * immediately unlinked.
42 */
43struct key_type key_type_dead = {
David Howellsc1644fe2017-04-18 15:31:08 +010044 .name = ".dead",
David Howells0c061b52011-08-22 14:09:36 +010045};
David Howells5d135442009-09-02 09:14:00 +010046
47/*
David Howells973c9f42011-01-20 16:38:33 +000048 * Schedule a garbage collection run.
49 * - time precision isn't particularly important
David Howells5d135442009-09-02 09:14:00 +010050 */
Baolin Wang074d5892017-11-15 16:38:45 +000051void key_schedule_gc(time64_t gc_at)
David Howells5d135442009-09-02 09:14:00 +010052{
53 unsigned long expires;
Baolin Wang074d5892017-11-15 16:38:45 +000054 time64_t now = ktime_get_real_seconds();
David Howells5d135442009-09-02 09:14:00 +010055
Baolin Wang074d5892017-11-15 16:38:45 +000056 kenter("%lld", gc_at - now);
David Howells5d135442009-09-02 09:14:00 +010057
David Howells0c061b52011-08-22 14:09:36 +010058 if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
59 kdebug("IMMEDIATE");
Tejun Heo3b07e9c2012-08-20 14:51:24 -070060 schedule_work(&key_gc_work);
David Howells5d135442009-09-02 09:14:00 +010061 } else if (gc_at < key_gc_next_run) {
David Howells0c061b52011-08-22 14:09:36 +010062 kdebug("DEFERRED");
63 key_gc_next_run = gc_at;
David Howells5d135442009-09-02 09:14:00 +010064 expires = jiffies + (gc_at - now) * HZ;
65 mod_timer(&key_gc_timer, expires);
66 }
67}
68
69/*
David Howellsfd758152012-05-11 10:56:56 +010070 * Schedule a dead links collection run.
71 */
72void key_schedule_gc_links(void)
73{
74 set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
Tejun Heo3b07e9c2012-08-20 14:51:24 -070075 schedule_work(&key_gc_work);
David Howellsfd758152012-05-11 10:56:56 +010076}
77
78/*
David Howells0c061b52011-08-22 14:09:36 +010079 * Some key's cleanup time was met after it expired, so we need to get the
80 * reaper to go through a cycle finding expired keys.
David Howells5d135442009-09-02 09:14:00 +010081 */
Kees Cook24ed9602017-08-28 11:28:21 -070082static void key_gc_timer_func(struct timer_list *unused)
David Howells5d135442009-09-02 09:14:00 +010083{
84 kenter("");
Baolin Wang074d5892017-11-15 16:38:45 +000085 key_gc_next_run = TIME64_MAX;
David Howellsfd758152012-05-11 10:56:56 +010086 key_schedule_gc_links();
David Howells5d135442009-09-02 09:14:00 +010087}
88
89/*
David Howells0c061b52011-08-22 14:09:36 +010090 * Reap keys of dead type.
91 *
92 * We use three flags to make sure we see three complete cycles of the garbage
93 * collector: the first to mark keys of that type as being dead, the second to
94 * collect dead links and the third to clean up the dead keys. We have to be
95 * careful as there may already be a cycle in progress.
96 *
97 * The caller must be holding key_types_sem.
98 */
99void key_gc_keytype(struct key_type *ktype)
100{
101 kenter("%s", ktype->name);
102
103 key_gc_dead_keytype = ktype;
104 set_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
105 smp_mb();
106 set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
107
108 kdebug("schedule");
Tejun Heo3b07e9c2012-08-20 14:51:24 -0700109 schedule_work(&key_gc_work);
David Howells0c061b52011-08-22 14:09:36 +0100110
111 kdebug("sleep");
NeilBrown74316202014-07-07 15:16:04 +1000112 wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE,
David Howells0c061b52011-08-22 14:09:36 +0100113 TASK_UNINTERRUPTIBLE);
114
115 key_gc_dead_keytype = NULL;
116 kleave("");
117}
118
David Howells5d135442009-09-02 09:14:00 +0100119/*
David Howells65d87fe2012-05-11 10:56:56 +0100120 * Garbage collect a list of unreferenced, detached keys
David Howells5d135442009-09-02 09:14:00 +0100121 */
David Howells65d87fe2012-05-11 10:56:56 +0100122static noinline void key_gc_unused_keys(struct list_head *keys)
David Howells5d135442009-09-02 09:14:00 +0100123{
David Howells65d87fe2012-05-11 10:56:56 +0100124 while (!list_empty(keys)) {
125 struct key *key =
126 list_entry(keys->next, struct key, graveyard_link);
David Howells363b02d2017-10-04 16:43:25 +0100127 short state = key->state;
128
David Howells65d87fe2012-05-11 10:56:56 +0100129 list_del(&key->graveyard_link);
David Howells8bc16de2011-08-22 14:09:11 +0100130
David Howells65d87fe2012-05-11 10:56:56 +0100131 kdebug("- %u", key->serial);
132 key_check(key);
David Howells8bc16de2011-08-22 14:09:11 +0100133
David Howellsf7e47672020-01-14 17:07:11 +0000134#ifdef CONFIG_KEY_NOTIFICATIONS
135 remove_watch_list(key->watchers, key->serial);
136 key->watchers = NULL;
137#endif
138
David Howellsf05819d2015-10-15 17:21:37 +0100139 /* Throw away the key data if the key is instantiated */
David Howells363b02d2017-10-04 16:43:25 +0100140 if (state == KEY_IS_POSITIVE && key->type->destroy)
David Howells94c45542015-09-25 16:30:08 +0100141 key->type->destroy(key);
142
David Howells65d87fe2012-05-11 10:56:56 +0100143 security_key_free(key);
David Howells8bc16de2011-08-22 14:09:11 +0100144
David Howells65d87fe2012-05-11 10:56:56 +0100145 /* deal with the user's key tracking and quota */
146 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
147 spin_lock(&key->user->lock);
148 key->user->qnkeys--;
149 key->user->qnbytes -= key->quotalen;
150 spin_unlock(&key->user->lock);
151 }
David Howells8bc16de2011-08-22 14:09:11 +0100152
David Howells65d87fe2012-05-11 10:56:56 +0100153 atomic_dec(&key->user->nkeys);
David Howells363b02d2017-10-04 16:43:25 +0100154 if (state != KEY_IS_UNINSTANTIATED)
David Howells65d87fe2012-05-11 10:56:56 +0100155 atomic_dec(&key->user->nikeys);
David Howells8bc16de2011-08-22 14:09:11 +0100156
Sasha Levina3a87842014-12-29 09:39:01 -0500157 key_user_put(key->user);
David Howells3b6e4de2019-06-26 21:02:32 +0100158 key_put_tag(key->domain_tag);
David Howells65d87fe2012-05-11 10:56:56 +0100159 kfree(key->description);
David Howells8bc16de2011-08-22 14:09:11 +0100160
Eric Biggers0620fdd2017-06-08 14:49:26 +0100161 memzero_explicit(key, sizeof(*key));
David Howells65d87fe2012-05-11 10:56:56 +0100162 kmem_cache_free(key_jar, key);
163 }
David Howells0c061b52011-08-22 14:09:36 +0100164}
David Howells8bc16de2011-08-22 14:09:11 +0100165
David Howells0c061b52011-08-22 14:09:36 +0100166/*
167 * Garbage collector for unused keys.
168 *
169 * This is done in process context so that we don't have to disable interrupts
170 * all over the place. key_put() schedules this rather than trying to do the
171 * cleanup itself, which means key_put() doesn't have to sleep.
172 */
173static void key_garbage_collector(struct work_struct *work)
174{
David Howells65d87fe2012-05-11 10:56:56 +0100175 static LIST_HEAD(graveyard);
David Howells0c061b52011-08-22 14:09:36 +0100176 static u8 gc_state; /* Internal persistent state */
177#define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */
178#define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */
179#define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */
180#define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */
181#define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */
182#define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */
183#define KEY_GC_FOUND_DEAD_KEY 0x80 /* - We found at least one dead key */
184
185 struct rb_node *cursor;
186 struct key *key;
Baolin Wang074d5892017-11-15 16:38:45 +0000187 time64_t new_timer, limit;
David Howells0c061b52011-08-22 14:09:36 +0100188
189 kenter("[%lx,%x]", key_gc_flags, gc_state);
190
Baolin Wang074d5892017-11-15 16:38:45 +0000191 limit = ktime_get_real_seconds();
David Howells0c061b52011-08-22 14:09:36 +0100192 if (limit > key_gc_delay)
193 limit -= key_gc_delay;
194 else
195 limit = key_gc_delay;
196
197 /* Work out what we're going to be doing in this pass */
198 gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
199 gc_state <<= 1;
200 if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
201 gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER;
202
203 if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
204 gc_state |= KEY_GC_REAPING_DEAD_1;
205 kdebug("new pass %x", gc_state);
206
Baolin Wang074d5892017-11-15 16:38:45 +0000207 new_timer = TIME64_MAX;
David Howells0c061b52011-08-22 14:09:36 +0100208
209 /* As only this function is permitted to remove things from the key
210 * serial tree, if cursor is non-NULL then it will always point to a
211 * valid node in the tree - even if lock got dropped.
212 */
213 spin_lock(&key_serial_lock);
214 cursor = rb_first(&key_serial_tree);
215
216continue_scanning:
217 while (cursor) {
218 key = rb_entry(cursor, struct key, serial_node);
219 cursor = rb_next(cursor);
220
Elena Reshetovafff29292017-03-31 15:20:48 +0300221 if (refcount_read(&key->usage) == 0)
David Howells0c061b52011-08-22 14:09:36 +0100222 goto found_unreferenced_key;
223
224 if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
225 if (key->type == key_gc_dead_keytype) {
226 gc_state |= KEY_GC_FOUND_DEAD_KEY;
227 set_bit(KEY_FLAG_DEAD, &key->flags);
Linus Torvalds028db3e2019-07-10 18:43:43 -0700228 key->perm = 0;
David Howells0c061b52011-08-22 14:09:36 +0100229 goto skip_dead_key;
Mat Martineau2b6aa412016-08-31 16:05:43 -0700230 } else if (key->type == &key_type_keyring &&
231 key->restrict_link) {
232 goto found_restricted_keyring;
David Howells0c061b52011-08-22 14:09:36 +0100233 }
234 }
235
236 if (gc_state & KEY_GC_SET_TIMER) {
237 if (key->expiry > limit && key->expiry < new_timer) {
Baolin Wang074d5892017-11-15 16:38:45 +0000238 kdebug("will expire %x in %lld",
David Howells0c061b52011-08-22 14:09:36 +0100239 key_serial(key), key->expiry - limit);
240 new_timer = key->expiry;
241 }
242 }
243
244 if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2))
245 if (key->type == key_gc_dead_keytype)
246 gc_state |= KEY_GC_FOUND_DEAD_KEY;
247
248 if ((gc_state & KEY_GC_REAPING_LINKS) ||
249 unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
250 if (key->type == &key_type_keyring)
251 goto found_keyring;
252 }
253
254 if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3))
255 if (key->type == key_gc_dead_keytype)
256 goto destroy_dead_key;
257
258 skip_dead_key:
259 if (spin_is_contended(&key_serial_lock) || need_resched())
260 goto contended;
261 }
262
263contended:
264 spin_unlock(&key_serial_lock);
265
266maybe_resched:
267 if (cursor) {
268 cond_resched();
269 spin_lock(&key_serial_lock);
270 goto continue_scanning;
271 }
272
273 /* We've completed the pass. Set the timer if we need to and queue a
274 * new cycle if necessary. We keep executing cycles until we find one
275 * where we didn't reap any keys.
276 */
277 kdebug("pass complete");
278
Baolin Wang074d5892017-11-15 16:38:45 +0000279 if (gc_state & KEY_GC_SET_TIMER && new_timer != (time64_t)TIME64_MAX) {
David Howells0c061b52011-08-22 14:09:36 +0100280 new_timer += key_gc_delay;
281 key_schedule_gc(new_timer);
282 }
283
David Howells65d87fe2012-05-11 10:56:56 +0100284 if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2) ||
285 !list_empty(&graveyard)) {
286 /* Make sure that all pending keyring payload destructions are
287 * fulfilled and that people aren't now looking at dead or
288 * dying keys that they don't have a reference upon or a link
289 * to.
David Howells0c061b52011-08-22 14:09:36 +0100290 */
David Howells65d87fe2012-05-11 10:56:56 +0100291 kdebug("gc sync");
David Howells0c061b52011-08-22 14:09:36 +0100292 synchronize_rcu();
293 }
294
David Howells65d87fe2012-05-11 10:56:56 +0100295 if (!list_empty(&graveyard)) {
296 kdebug("gc keys");
297 key_gc_unused_keys(&graveyard);
298 }
299
David Howells0c061b52011-08-22 14:09:36 +0100300 if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 |
301 KEY_GC_REAPING_DEAD_2))) {
302 if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) {
303 /* No remaining dead keys: short circuit the remaining
304 * keytype reap cycles.
305 */
306 kdebug("dead short");
307 gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2);
308 gc_state |= KEY_GC_REAPING_DEAD_3;
309 } else {
310 gc_state |= KEY_GC_REAP_AGAIN;
311 }
312 }
313
314 if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) {
315 kdebug("dead wake");
316 smp_mb();
317 clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
318 wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE);
319 }
320
321 if (gc_state & KEY_GC_REAP_AGAIN)
Tejun Heo3b07e9c2012-08-20 14:51:24 -0700322 schedule_work(&key_gc_work);
David Howells0c061b52011-08-22 14:09:36 +0100323 kleave(" [end %x]", gc_state);
324 return;
325
326 /* We found an unreferenced key - once we've removed it from the tree,
327 * we can safely drop the lock.
328 */
329found_unreferenced_key:
330 kdebug("unrefd key %d", key->serial);
331 rb_erase(&key->serial_node, &key_serial_tree);
332 spin_unlock(&key_serial_lock);
333
David Howells65d87fe2012-05-11 10:56:56 +0100334 list_add_tail(&key->graveyard_link, &graveyard);
David Howells0c061b52011-08-22 14:09:36 +0100335 gc_state |= KEY_GC_REAP_AGAIN;
336 goto maybe_resched;
337
Mat Martineau2b6aa412016-08-31 16:05:43 -0700338 /* We found a restricted keyring and need to update the restriction if
339 * it is associated with the dead key type.
340 */
341found_restricted_keyring:
342 spin_unlock(&key_serial_lock);
343 keyring_restriction_gc(key, key_gc_dead_keytype);
344 goto maybe_resched;
345
David Howells0c061b52011-08-22 14:09:36 +0100346 /* We found a keyring and we need to check the payload for links to
347 * dead or expired keys. We don't flag another reap immediately as we
348 * have to wait for the old payload to be destroyed by RCU before we
349 * can reap the keys to which it refers.
350 */
351found_keyring:
352 spin_unlock(&key_serial_lock);
David Howells62fe3182013-11-14 13:02:31 +0000353 keyring_gc(key, limit);
David Howells0c061b52011-08-22 14:09:36 +0100354 goto maybe_resched;
355
356 /* We found a dead key that is still referenced. Reset its type and
357 * destroy its payload with its semaphore held.
358 */
359destroy_dead_key:
360 spin_unlock(&key_serial_lock);
361 kdebug("destroy key %d", key->serial);
362 down_write(&key->sem);
363 key->type = &key_type_dead;
364 if (key_gc_dead_keytype->destroy)
365 key_gc_dead_keytype->destroy(key);
366 memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
367 up_write(&key->sem);
368 goto maybe_resched;
David Howells8bc16de2011-08-22 14:09:11 +0100369}