blob: 0bdf3152735eb4de2135ebd69c2c68a8b1a79a08 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Dave Chinnera38e4082013-08-28 10:17:58 +10002/*
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
5 *
6 * Generic LRU infrastructure
7 */
8#include <linux/kernel.h>
9#include <linux/module.h>
Dave Chinner3b1d58a2013-08-28 10:18:00 +100010#include <linux/mm.h>
Dave Chinnera38e4082013-08-28 10:17:58 +100011#include <linux/list_lru.h>
Glauber Costa5ca302c2013-08-28 10:18:18 +100012#include <linux/slab.h>
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080013#include <linux/mutex.h>
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080014#include <linux/memcontrol.h>
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080015
Kirill Tkhai84c07d12018-08-17 15:47:25 -070016#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080017static LIST_HEAD(list_lrus);
18static DEFINE_MUTEX(list_lrus_mutex);
19
20static void list_lru_register(struct list_lru *lru)
21{
22 mutex_lock(&list_lrus_mutex);
23 list_add(&lru->list, &list_lrus);
24 mutex_unlock(&list_lrus_mutex);
25}
26
27static void list_lru_unregister(struct list_lru *lru)
28{
29 mutex_lock(&list_lrus_mutex);
30 list_del(&lru->list);
31 mutex_unlock(&list_lrus_mutex);
32}
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080033
Kirill Tkhaifae91d62018-08-17 15:48:10 -070034static int lru_shrinker_id(struct list_lru *lru)
35{
36 return lru->shrinker_id;
37}
38
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080039static inline bool list_lru_memcg_aware(struct list_lru *lru)
40{
Raghavendra K T145949a2015-11-05 18:46:26 -080041 /*
42 * This needs node 0 to be always present, even
43 * in the systems supporting sparse numa ids.
44 */
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080045 return !!lru->node[0].memcg_lrus;
46}
47
48static inline struct list_lru_one *
49list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
50{
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -070051 struct list_lru_memcg *memcg_lrus;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080052 /*
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -070053 * Either lock or RCU protects the array of per cgroup lists
54 * from relocation (see memcg_update_list_lru_node).
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080055 */
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -070056 memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
57 lockdep_is_held(&nlru->lock));
58 if (memcg_lrus && idx >= 0)
59 return memcg_lrus->lru[idx];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080060 return &nlru->lru;
61}
62
Vladimir Davydovdf406552015-11-05 18:49:04 -080063static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
64{
65 struct page *page;
66
67 if (!memcg_kmem_enabled())
68 return NULL;
69 page = virt_to_head_page(ptr);
70 return page->mem_cgroup;
71}
72
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080073static inline struct list_lru_one *
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070074list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
75 struct mem_cgroup **memcg_ptr)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080076{
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070077 struct list_lru_one *l = &nlru->lru;
78 struct mem_cgroup *memcg = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080079
80 if (!nlru->memcg_lrus)
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070081 goto out;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080082
83 memcg = mem_cgroup_from_kmem(ptr);
84 if (!memcg)
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070085 goto out;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080086
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070087 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
88out:
89 if (memcg_ptr)
90 *memcg_ptr = memcg;
91 return l;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080092}
93#else
Kirill Tkhaie0295232018-08-17 15:47:21 -070094static void list_lru_register(struct list_lru *lru)
95{
96}
97
98static void list_lru_unregister(struct list_lru *lru)
99{
100}
101
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700102static int lru_shrinker_id(struct list_lru *lru)
103{
104 return -1;
105}
106
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800107static inline bool list_lru_memcg_aware(struct list_lru *lru)
108{
109 return false;
110}
111
112static inline struct list_lru_one *
113list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
114{
115 return &nlru->lru;
116}
117
118static inline struct list_lru_one *
Kirill Tkhai44bd4a42018-08-17 15:47:54 -0700119list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
120 struct mem_cgroup **memcg_ptr)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800121{
Kirill Tkhai44bd4a42018-08-17 15:47:54 -0700122 if (memcg_ptr)
123 *memcg_ptr = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800124 return &nlru->lru;
125}
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700126#endif /* CONFIG_MEMCG_KMEM */
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800127
Dave Chinnera38e4082013-08-28 10:17:58 +1000128bool list_lru_add(struct list_lru *lru, struct list_head *item)
129{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000130 int nid = page_to_nid(virt_to_page(item));
131 struct list_lru_node *nlru = &lru->node[nid];
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700132 struct mem_cgroup *memcg;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800133 struct list_lru_one *l;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000134
135 spin_lock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000136 if (list_empty(item)) {
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700137 l = list_lru_from_kmem(nlru, item, &memcg);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800138 list_add_tail(item, &l->list);
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700139 /* Set shrinker bit if the first element was added */
140 if (!l->nr_items++)
141 memcg_set_shrinker_bit(memcg, nid,
142 lru_shrinker_id(lru));
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700143 nlru->nr_items++;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000144 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000145 return true;
146 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000147 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000148 return false;
149}
150EXPORT_SYMBOL_GPL(list_lru_add);
151
152bool list_lru_del(struct list_lru *lru, struct list_head *item)
153{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000154 int nid = page_to_nid(virt_to_page(item));
155 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800156 struct list_lru_one *l;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000157
158 spin_lock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000159 if (!list_empty(item)) {
Kirill Tkhai44bd4a42018-08-17 15:47:54 -0700160 l = list_lru_from_kmem(nlru, item, NULL);
Dave Chinnera38e4082013-08-28 10:17:58 +1000161 list_del_init(item);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800162 l->nr_items--;
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700163 nlru->nr_items--;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000164 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000165 return true;
166 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000167 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000168 return false;
169}
170EXPORT_SYMBOL_GPL(list_lru_del);
171
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800172void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
173{
174 list_del_init(item);
175 list->nr_items--;
176}
177EXPORT_SYMBOL_GPL(list_lru_isolate);
178
179void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
180 struct list_head *head)
181{
182 list_move(item, head);
183 list->nr_items--;
184}
185EXPORT_SYMBOL_GPL(list_lru_isolate_move);
186
Andrew Morton930eaac2018-08-17 15:46:11 -0700187unsigned long list_lru_count_one(struct list_lru *lru,
188 int nid, struct mem_cgroup *memcg)
Dave Chinnera38e4082013-08-28 10:17:58 +1000189{
Glauber Costa6a4f4962013-08-28 10:18:02 +1000190 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800191 struct list_lru_one *l;
192 unsigned long count;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000193
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700194 rcu_read_lock();
Andrew Morton930eaac2018-08-17 15:46:11 -0700195 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800196 count = l->nr_items;
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700197 rcu_read_unlock();
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000198
199 return count;
200}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800201EXPORT_SYMBOL_GPL(list_lru_count_one);
202
203unsigned long list_lru_count_node(struct list_lru *lru, int nid)
204{
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700205 struct list_lru_node *nlru;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800206
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700207 nlru = &lru->node[nid];
208 return nlru->nr_items;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800209}
Glauber Costa6a4f4962013-08-28 10:18:02 +1000210EXPORT_SYMBOL_GPL(list_lru_count_node);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000211
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800212static unsigned long
Sebastian Andrzej Siewior6e018962018-08-17 15:49:51 -0700213__list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800214 list_lru_walk_cb isolate, void *cb_arg,
215 unsigned long *nr_to_walk)
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000216{
217
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800218 struct list_lru_one *l;
Dave Chinnera38e4082013-08-28 10:17:58 +1000219 struct list_head *item, *n;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000220 unsigned long isolated = 0;
Dave Chinnera38e4082013-08-28 10:17:58 +1000221
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800222 l = list_lru_from_memcg_idx(nlru, memcg_idx);
Dave Chinnera38e4082013-08-28 10:17:58 +1000223restart:
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800224 list_for_each_safe(item, n, &l->list) {
Dave Chinnera38e4082013-08-28 10:17:58 +1000225 enum lru_status ret;
Dave Chinner5cedf7212013-08-28 10:18:01 +1000226
227 /*
228 * decrement nr_to_walk first so that we don't livelock if we
229 * get stuck on large numbesr of LRU_RETRY items
230 */
Russell Kingc56b0972013-10-30 14:16:16 +0000231 if (!*nr_to_walk)
Dave Chinner5cedf7212013-08-28 10:18:01 +1000232 break;
Russell Kingc56b0972013-10-30 14:16:16 +0000233 --*nr_to_walk;
Dave Chinner5cedf7212013-08-28 10:18:01 +1000234
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800235 ret = isolate(item, l, &nlru->lock, cb_arg);
Dave Chinnera38e4082013-08-28 10:17:58 +1000236 switch (ret) {
Johannes Weiner449dd692014-04-03 14:47:56 -0700237 case LRU_REMOVED_RETRY:
238 assert_spin_locked(&nlru->lock);
Gustavo A. R. Silva5b568ac2017-11-15 17:38:49 -0800239 /* fall through */
Dave Chinnera38e4082013-08-28 10:17:58 +1000240 case LRU_REMOVED:
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000241 isolated++;
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700242 nlru->nr_items--;
Johannes Weiner449dd692014-04-03 14:47:56 -0700243 /*
244 * If the lru lock has been dropped, our list
245 * traversal is now invalid and so we have to
246 * restart from scratch.
247 */
248 if (ret == LRU_REMOVED_RETRY)
249 goto restart;
Dave Chinnera38e4082013-08-28 10:17:58 +1000250 break;
251 case LRU_ROTATE:
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800252 list_move_tail(item, &l->list);
Dave Chinnera38e4082013-08-28 10:17:58 +1000253 break;
254 case LRU_SKIP:
255 break;
256 case LRU_RETRY:
Dave Chinner5cedf7212013-08-28 10:18:01 +1000257 /*
258 * The lru lock has been dropped, our list traversal is
259 * now invalid and so we have to restart from scratch.
260 */
Johannes Weiner449dd692014-04-03 14:47:56 -0700261 assert_spin_locked(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000262 goto restart;
263 default:
264 BUG();
265 }
Dave Chinnera38e4082013-08-28 10:17:58 +1000266 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000267 return isolated;
268}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800269
270unsigned long
271list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
272 list_lru_walk_cb isolate, void *cb_arg,
273 unsigned long *nr_to_walk)
274{
Sebastian Andrzej Siewior6cfe57a2018-08-17 15:49:48 -0700275 struct list_lru_node *nlru = &lru->node[nid];
276 unsigned long ret;
277
278 spin_lock(&nlru->lock);
Sebastian Andrzej Siewior6e018962018-08-17 15:49:51 -0700279 ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
280 nr_to_walk);
Sebastian Andrzej Siewior6cfe57a2018-08-17 15:49:48 -0700281 spin_unlock(&nlru->lock);
282 return ret;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800283}
284EXPORT_SYMBOL_GPL(list_lru_walk_one);
285
Sebastian Andrzej Siewior6b51e882018-08-17 15:49:55 -0700286unsigned long
287list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
288 list_lru_walk_cb isolate, void *cb_arg,
289 unsigned long *nr_to_walk)
290{
291 struct list_lru_node *nlru = &lru->node[nid];
292 unsigned long ret;
293
294 spin_lock_irq(&nlru->lock);
295 ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
296 nr_to_walk);
297 spin_unlock_irq(&nlru->lock);
298 return ret;
299}
300
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800301unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
302 list_lru_walk_cb isolate, void *cb_arg,
303 unsigned long *nr_to_walk)
304{
305 long isolated = 0;
306 int memcg_idx;
307
Sebastian Andrzej Siewior87a5ffc2018-08-17 15:49:45 -0700308 isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
309 nr_to_walk);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800310 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
311 for_each_memcg_cache_index(memcg_idx) {
Sebastian Andrzej Siewior6cfe57a2018-08-17 15:49:48 -0700312 struct list_lru_node *nlru = &lru->node[nid];
313
314 spin_lock(&nlru->lock);
Sebastian Andrzej Siewior6e018962018-08-17 15:49:51 -0700315 isolated += __list_lru_walk_one(nlru, memcg_idx,
316 isolate, cb_arg,
317 nr_to_walk);
Sebastian Andrzej Siewior6cfe57a2018-08-17 15:49:48 -0700318 spin_unlock(&nlru->lock);
319
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800320 if (*nr_to_walk <= 0)
321 break;
322 }
323 }
324 return isolated;
325}
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000326EXPORT_SYMBOL_GPL(list_lru_walk_node);
327
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800328static void init_one_lru(struct list_lru_one *l)
329{
330 INIT_LIST_HEAD(&l->list);
331 l->nr_items = 0;
332}
333
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700334#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800335static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
336 int begin, int end)
337{
338 int i;
339
340 for (i = begin; i < end; i++)
341 kfree(memcg_lrus->lru[i]);
342}
343
344static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
345 int begin, int end)
346{
347 int i;
348
349 for (i = begin; i < end; i++) {
350 struct list_lru_one *l;
351
352 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
353 if (!l)
354 goto fail;
355
356 init_one_lru(l);
357 memcg_lrus->lru[i] = l;
358 }
359 return 0;
360fail:
361 __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
362 return -ENOMEM;
363}
364
365static int memcg_init_list_lru_node(struct list_lru_node *nlru)
366{
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700367 struct list_lru_memcg *memcg_lrus;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800368 int size = memcg_nr_cache_ids;
369
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700370 memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
371 size * sizeof(void *), GFP_KERNEL);
372 if (!memcg_lrus)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800373 return -ENOMEM;
374
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700375 if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
376 kvfree(memcg_lrus);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800377 return -ENOMEM;
378 }
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700379 RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800380
381 return 0;
382}
383
384static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
385{
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700386 struct list_lru_memcg *memcg_lrus;
387 /*
388 * This is called when shrinker has already been unregistered,
389 * and nobody can use it. So, there is no need to use kvfree_rcu().
390 */
391 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
392 __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
393 kvfree(memcg_lrus);
394}
395
396static void kvfree_rcu(struct rcu_head *head)
397{
398 struct list_lru_memcg *mlru;
399
400 mlru = container_of(head, struct list_lru_memcg, rcu);
401 kvfree(mlru);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800402}
403
404static int memcg_update_list_lru_node(struct list_lru_node *nlru,
405 int old_size, int new_size)
406{
407 struct list_lru_memcg *old, *new;
408
409 BUG_ON(old_size > new_size);
410
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700411 old = rcu_dereference_protected(nlru->memcg_lrus,
412 lockdep_is_held(&list_lrus_mutex));
413 new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800414 if (!new)
415 return -ENOMEM;
416
417 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
Johannes Weinerf80c7da2017-10-03 16:16:10 -0700418 kvfree(new);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800419 return -ENOMEM;
420 }
421
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700422 memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800423
424 /*
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700425 * The locking below allows readers that hold nlru->lock avoid taking
426 * rcu_read_lock (see list_lru_from_memcg_idx).
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800427 *
428 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
429 * we have to use IRQ-safe primitives here to avoid deadlock.
430 */
431 spin_lock_irq(&nlru->lock);
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700432 rcu_assign_pointer(nlru->memcg_lrus, new);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800433 spin_unlock_irq(&nlru->lock);
434
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700435 call_rcu(&old->rcu, kvfree_rcu);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800436 return 0;
437}
438
439static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
440 int old_size, int new_size)
441{
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700442 struct list_lru_memcg *memcg_lrus;
443
444 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
445 lockdep_is_held(&list_lrus_mutex));
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800446 /* do not bother shrinking the array back to the old size, because we
447 * cannot handle allocation failures here */
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700448 __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800449}
450
451static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
452{
453 int i;
454
Raghavendra K T145949a2015-11-05 18:46:26 -0800455 if (!memcg_aware)
456 return 0;
457
458 for_each_node(i) {
459 if (memcg_init_list_lru_node(&lru->node[i]))
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800460 goto fail;
461 }
462 return 0;
463fail:
Raghavendra K T145949a2015-11-05 18:46:26 -0800464 for (i = i - 1; i >= 0; i--) {
465 if (!lru->node[i].memcg_lrus)
466 continue;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800467 memcg_destroy_list_lru_node(&lru->node[i]);
Raghavendra K T145949a2015-11-05 18:46:26 -0800468 }
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800469 return -ENOMEM;
470}
471
472static void memcg_destroy_list_lru(struct list_lru *lru)
473{
474 int i;
475
476 if (!list_lru_memcg_aware(lru))
477 return;
478
Raghavendra K T145949a2015-11-05 18:46:26 -0800479 for_each_node(i)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800480 memcg_destroy_list_lru_node(&lru->node[i]);
481}
482
483static int memcg_update_list_lru(struct list_lru *lru,
484 int old_size, int new_size)
485{
486 int i;
487
488 if (!list_lru_memcg_aware(lru))
489 return 0;
490
Raghavendra K T145949a2015-11-05 18:46:26 -0800491 for_each_node(i) {
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800492 if (memcg_update_list_lru_node(&lru->node[i],
493 old_size, new_size))
494 goto fail;
495 }
496 return 0;
497fail:
Raghavendra K T145949a2015-11-05 18:46:26 -0800498 for (i = i - 1; i >= 0; i--) {
499 if (!lru->node[i].memcg_lrus)
500 continue;
501
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800502 memcg_cancel_update_list_lru_node(&lru->node[i],
503 old_size, new_size);
Raghavendra K T145949a2015-11-05 18:46:26 -0800504 }
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800505 return -ENOMEM;
506}
507
508static void memcg_cancel_update_list_lru(struct list_lru *lru,
509 int old_size, int new_size)
510{
511 int i;
512
513 if (!list_lru_memcg_aware(lru))
514 return;
515
Raghavendra K T145949a2015-11-05 18:46:26 -0800516 for_each_node(i)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800517 memcg_cancel_update_list_lru_node(&lru->node[i],
518 old_size, new_size);
519}
520
521int memcg_update_all_list_lrus(int new_size)
522{
523 int ret = 0;
524 struct list_lru *lru;
525 int old_size = memcg_nr_cache_ids;
526
527 mutex_lock(&list_lrus_mutex);
528 list_for_each_entry(lru, &list_lrus, list) {
529 ret = memcg_update_list_lru(lru, old_size, new_size);
530 if (ret)
531 goto fail;
532 }
533out:
534 mutex_unlock(&list_lrus_mutex);
535 return ret;
536fail:
537 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
538 memcg_cancel_update_list_lru(lru, old_size, new_size);
539 goto out;
540}
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800541
Kirill Tkhai3b82c4d2018-08-17 15:48:01 -0700542static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
Kirill Tkhai9bec5c32018-08-17 15:47:58 -0700543 int src_idx, struct mem_cgroup *dst_memcg)
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800544{
Kirill Tkhai3b82c4d2018-08-17 15:48:01 -0700545 struct list_lru_node *nlru = &lru->node[nid];
Kirill Tkhai9bec5c32018-08-17 15:47:58 -0700546 int dst_idx = dst_memcg->kmemcg_id;
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800547 struct list_lru_one *src, *dst;
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700548 bool set;
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800549
550 /*
551 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
552 * we have to use IRQ-safe primitives here to avoid deadlock.
553 */
554 spin_lock_irq(&nlru->lock);
555
556 src = list_lru_from_memcg_idx(nlru, src_idx);
557 dst = list_lru_from_memcg_idx(nlru, dst_idx);
558
559 list_splice_init(&src->list, &dst->list);
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700560 set = (!dst->nr_items && src->nr_items);
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800561 dst->nr_items += src->nr_items;
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700562 if (set)
563 memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800564 src->nr_items = 0;
565
566 spin_unlock_irq(&nlru->lock);
567}
568
569static void memcg_drain_list_lru(struct list_lru *lru,
Kirill Tkhai9bec5c32018-08-17 15:47:58 -0700570 int src_idx, struct mem_cgroup *dst_memcg)
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800571{
572 int i;
573
574 if (!list_lru_memcg_aware(lru))
575 return;
576
Raghavendra K T145949a2015-11-05 18:46:26 -0800577 for_each_node(i)
Kirill Tkhai3b82c4d2018-08-17 15:48:01 -0700578 memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800579}
580
Kirill Tkhai9bec5c32018-08-17 15:47:58 -0700581void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800582{
583 struct list_lru *lru;
584
585 mutex_lock(&list_lrus_mutex);
586 list_for_each_entry(lru, &list_lrus, list)
Kirill Tkhai9bec5c32018-08-17 15:47:58 -0700587 memcg_drain_list_lru(lru, src_idx, dst_memcg);
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800588 mutex_unlock(&list_lrus_mutex);
589}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800590#else
591static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
592{
593 return 0;
594}
595
596static void memcg_destroy_list_lru(struct list_lru *lru)
597{
598}
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700599#endif /* CONFIG_MEMCG_KMEM */
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800600
601int __list_lru_init(struct list_lru *lru, bool memcg_aware,
Kirill Tkhaic92e8e12018-08-17 15:47:50 -0700602 struct lock_class_key *key, struct shrinker *shrinker)
Dave Chinnera38e4082013-08-28 10:17:58 +1000603{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000604 int i;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800605 int err = -ENOMEM;
606
Kirill Tkhaic92e8e12018-08-17 15:47:50 -0700607#ifdef CONFIG_MEMCG_KMEM
608 if (shrinker)
609 lru->shrinker_id = shrinker->id;
610 else
611 lru->shrinker_id = -1;
612#endif
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800613 memcg_get_cache_ids();
Glauber Costa5ca302c2013-08-28 10:18:18 +1000614
Alexey Dobriyanb9726c22019-03-05 15:48:26 -0800615 lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000616 if (!lru->node)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800617 goto out;
Dave Chinnera38e4082013-08-28 10:17:58 +1000618
Raghavendra K T145949a2015-11-05 18:46:26 -0800619 for_each_node(i) {
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000620 spin_lock_init(&lru->node[i].lock);
Johannes Weiner449dd692014-04-03 14:47:56 -0700621 if (key)
622 lockdep_set_class(&lru->node[i].lock, key);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800623 init_one_lru(&lru->node[i].lru);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000624 }
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800625
626 err = memcg_init_list_lru(lru, memcg_aware);
627 if (err) {
628 kfree(lru->node);
Alexander Polakov1bc11d72016-10-27 17:46:27 -0700629 /* Do this so a list_lru_destroy() doesn't crash: */
630 lru->node = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800631 goto out;
632 }
633
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800634 list_lru_register(lru);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800635out:
636 memcg_put_cache_ids();
637 return err;
Dave Chinnera38e4082013-08-28 10:17:58 +1000638}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800639EXPORT_SYMBOL_GPL(__list_lru_init);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000640
641void list_lru_destroy(struct list_lru *lru)
642{
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800643 /* Already destroyed or not yet initialized? */
644 if (!lru->node)
645 return;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800646
647 memcg_get_cache_ids();
648
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800649 list_lru_unregister(lru);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800650
651 memcg_destroy_list_lru(lru);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000652 kfree(lru->node);
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800653 lru->node = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800654
Kirill Tkhaic92e8e12018-08-17 15:47:50 -0700655#ifdef CONFIG_MEMCG_KMEM
656 lru->shrinker_id = -1;
657#endif
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800658 memcg_put_cache_ids();
Glauber Costa5ca302c2013-08-28 10:18:18 +1000659}
660EXPORT_SYMBOL_GPL(list_lru_destroy);