blob: a6031f1c5bd78891e2d9bd4dbe6e37debe1ed520 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Dave Chinnera38e4082013-08-28 10:17:58 +10002/*
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
5 *
6 * Generic LRU infrastructure
7 */
8#include <linux/kernel.h>
9#include <linux/module.h>
Dave Chinner3b1d58a2013-08-28 10:18:00 +100010#include <linux/mm.h>
Dave Chinnera38e4082013-08-28 10:17:58 +100011#include <linux/list_lru.h>
Glauber Costa5ca302c2013-08-28 10:18:18 +100012#include <linux/slab.h>
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080013#include <linux/mutex.h>
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080014#include <linux/memcontrol.h>
Roman Gushchin4d96ba32019-07-11 20:56:31 -070015#include "slab.h"
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080016
Kirill Tkhai84c07d12018-08-17 15:47:25 -070017#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080018static LIST_HEAD(list_lrus);
19static DEFINE_MUTEX(list_lrus_mutex);
20
21static void list_lru_register(struct list_lru *lru)
22{
23 mutex_lock(&list_lrus_mutex);
24 list_add(&lru->list, &list_lrus);
25 mutex_unlock(&list_lrus_mutex);
26}
27
28static void list_lru_unregister(struct list_lru *lru)
29{
30 mutex_lock(&list_lrus_mutex);
31 list_del(&lru->list);
32 mutex_unlock(&list_lrus_mutex);
33}
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080034
Kirill Tkhaifae91d62018-08-17 15:48:10 -070035static int lru_shrinker_id(struct list_lru *lru)
36{
37 return lru->shrinker_id;
38}
39
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080040static inline bool list_lru_memcg_aware(struct list_lru *lru)
41{
Jiri Slaby3e858992019-05-31 22:30:26 -070042 return lru->memcg_aware;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080043}
44
45static inline struct list_lru_one *
46list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
47{
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -070048 struct list_lru_memcg *memcg_lrus;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080049 /*
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -070050 * Either lock or RCU protects the array of per cgroup lists
51 * from relocation (see memcg_update_list_lru_node).
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080052 */
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -070053 memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
54 lockdep_is_held(&nlru->lock));
55 if (memcg_lrus && idx >= 0)
56 return memcg_lrus->lru[idx];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080057 return &nlru->lru;
58}
59
60static inline struct list_lru_one *
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070061list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
62 struct mem_cgroup **memcg_ptr)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080063{
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070064 struct list_lru_one *l = &nlru->lru;
65 struct mem_cgroup *memcg = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080066
67 if (!nlru->memcg_lrus)
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070068 goto out;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080069
Roman Gushchin4f103c62020-04-01 21:06:36 -070070 memcg = mem_cgroup_from_obj(ptr);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080071 if (!memcg)
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070072 goto out;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080073
Kirill Tkhai44bd4a42018-08-17 15:47:54 -070074 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
75out:
76 if (memcg_ptr)
77 *memcg_ptr = memcg;
78 return l;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080079}
80#else
Kirill Tkhaie0295232018-08-17 15:47:21 -070081static void list_lru_register(struct list_lru *lru)
82{
83}
84
85static void list_lru_unregister(struct list_lru *lru)
86{
87}
88
Kirill Tkhaifae91d62018-08-17 15:48:10 -070089static int lru_shrinker_id(struct list_lru *lru)
90{
91 return -1;
92}
93
Vladimir Davydov60d3fd32015-02-12 14:59:10 -080094static inline bool list_lru_memcg_aware(struct list_lru *lru)
95{
96 return false;
97}
98
99static inline struct list_lru_one *
100list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
101{
102 return &nlru->lru;
103}
104
105static inline struct list_lru_one *
Kirill Tkhai44bd4a42018-08-17 15:47:54 -0700106list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
107 struct mem_cgroup **memcg_ptr)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800108{
Kirill Tkhai44bd4a42018-08-17 15:47:54 -0700109 if (memcg_ptr)
110 *memcg_ptr = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800111 return &nlru->lru;
112}
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700113#endif /* CONFIG_MEMCG_KMEM */
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800114
Dave Chinnera38e4082013-08-28 10:17:58 +1000115bool list_lru_add(struct list_lru *lru, struct list_head *item)
116{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000117 int nid = page_to_nid(virt_to_page(item));
118 struct list_lru_node *nlru = &lru->node[nid];
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700119 struct mem_cgroup *memcg;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800120 struct list_lru_one *l;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000121
122 spin_lock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000123 if (list_empty(item)) {
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700124 l = list_lru_from_kmem(nlru, item, &memcg);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800125 list_add_tail(item, &l->list);
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700126 /* Set shrinker bit if the first element was added */
127 if (!l->nr_items++)
Yang Shi2bfd3632021-05-04 18:36:11 -0700128 set_shrinker_bit(memcg, nid,
129 lru_shrinker_id(lru));
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700130 nlru->nr_items++;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000131 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000132 return true;
133 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000134 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000135 return false;
136}
137EXPORT_SYMBOL_GPL(list_lru_add);
138
139bool list_lru_del(struct list_lru *lru, struct list_head *item)
140{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000141 int nid = page_to_nid(virt_to_page(item));
142 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800143 struct list_lru_one *l;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000144
145 spin_lock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000146 if (!list_empty(item)) {
Kirill Tkhai44bd4a42018-08-17 15:47:54 -0700147 l = list_lru_from_kmem(nlru, item, NULL);
Dave Chinnera38e4082013-08-28 10:17:58 +1000148 list_del_init(item);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800149 l->nr_items--;
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700150 nlru->nr_items--;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000151 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000152 return true;
153 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000154 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000155 return false;
156}
157EXPORT_SYMBOL_GPL(list_lru_del);
158
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800159void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
160{
161 list_del_init(item);
162 list->nr_items--;
163}
164EXPORT_SYMBOL_GPL(list_lru_isolate);
165
166void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
167 struct list_head *head)
168{
169 list_move(item, head);
170 list->nr_items--;
171}
172EXPORT_SYMBOL_GPL(list_lru_isolate_move);
173
Andrew Morton930eaac2018-08-17 15:46:11 -0700174unsigned long list_lru_count_one(struct list_lru *lru,
175 int nid, struct mem_cgroup *memcg)
Dave Chinnera38e4082013-08-28 10:17:58 +1000176{
Glauber Costa6a4f4962013-08-28 10:18:02 +1000177 struct list_lru_node *nlru = &lru->node[nid];
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800178 struct list_lru_one *l;
179 unsigned long count;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000180
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700181 rcu_read_lock();
Andrew Morton930eaac2018-08-17 15:46:11 -0700182 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
Qian Caia1f45932020-08-14 17:31:41 -0700183 count = READ_ONCE(l->nr_items);
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700184 rcu_read_unlock();
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000185
186 return count;
187}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800188EXPORT_SYMBOL_GPL(list_lru_count_one);
189
190unsigned long list_lru_count_node(struct list_lru *lru, int nid)
191{
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700192 struct list_lru_node *nlru;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800193
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700194 nlru = &lru->node[nid];
195 return nlru->nr_items;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800196}
Glauber Costa6a4f4962013-08-28 10:18:02 +1000197EXPORT_SYMBOL_GPL(list_lru_count_node);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000198
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800199static unsigned long
Sebastian Andrzej Siewior6e018962018-08-17 15:49:51 -0700200__list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx,
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800201 list_lru_walk_cb isolate, void *cb_arg,
202 unsigned long *nr_to_walk)
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000203{
204
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800205 struct list_lru_one *l;
Dave Chinnera38e4082013-08-28 10:17:58 +1000206 struct list_head *item, *n;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000207 unsigned long isolated = 0;
Dave Chinnera38e4082013-08-28 10:17:58 +1000208
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800209 l = list_lru_from_memcg_idx(nlru, memcg_idx);
Dave Chinnera38e4082013-08-28 10:17:58 +1000210restart:
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800211 list_for_each_safe(item, n, &l->list) {
Dave Chinnera38e4082013-08-28 10:17:58 +1000212 enum lru_status ret;
Dave Chinner5cedf7212013-08-28 10:18:01 +1000213
214 /*
215 * decrement nr_to_walk first so that we don't livelock if we
Ethon Paul3dc5f032020-06-04 16:49:19 -0700216 * get stuck on large numbers of LRU_RETRY items
Dave Chinner5cedf7212013-08-28 10:18:01 +1000217 */
Russell Kingc56b0972013-10-30 14:16:16 +0000218 if (!*nr_to_walk)
Dave Chinner5cedf7212013-08-28 10:18:01 +1000219 break;
Russell Kingc56b0972013-10-30 14:16:16 +0000220 --*nr_to_walk;
Dave Chinner5cedf7212013-08-28 10:18:01 +1000221
Vladimir Davydov3f97b162015-02-12 14:59:35 -0800222 ret = isolate(item, l, &nlru->lock, cb_arg);
Dave Chinnera38e4082013-08-28 10:17:58 +1000223 switch (ret) {
Johannes Weiner449dd692014-04-03 14:47:56 -0700224 case LRU_REMOVED_RETRY:
225 assert_spin_locked(&nlru->lock);
Joe Perchese4a9bc52020-04-06 20:08:39 -0700226 fallthrough;
Dave Chinnera38e4082013-08-28 10:17:58 +1000227 case LRU_REMOVED:
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000228 isolated++;
Sahitya Tummala2c80cd52017-07-10 15:49:57 -0700229 nlru->nr_items--;
Johannes Weiner449dd692014-04-03 14:47:56 -0700230 /*
231 * If the lru lock has been dropped, our list
232 * traversal is now invalid and so we have to
233 * restart from scratch.
234 */
235 if (ret == LRU_REMOVED_RETRY)
236 goto restart;
Dave Chinnera38e4082013-08-28 10:17:58 +1000237 break;
238 case LRU_ROTATE:
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800239 list_move_tail(item, &l->list);
Dave Chinnera38e4082013-08-28 10:17:58 +1000240 break;
241 case LRU_SKIP:
242 break;
243 case LRU_RETRY:
Dave Chinner5cedf7212013-08-28 10:18:01 +1000244 /*
245 * The lru lock has been dropped, our list traversal is
246 * now invalid and so we have to restart from scratch.
247 */
Johannes Weiner449dd692014-04-03 14:47:56 -0700248 assert_spin_locked(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000249 goto restart;
250 default:
251 BUG();
252 }
Dave Chinnera38e4082013-08-28 10:17:58 +1000253 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000254 return isolated;
255}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800256
257unsigned long
258list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
259 list_lru_walk_cb isolate, void *cb_arg,
260 unsigned long *nr_to_walk)
261{
Sebastian Andrzej Siewior6cfe57a2018-08-17 15:49:48 -0700262 struct list_lru_node *nlru = &lru->node[nid];
263 unsigned long ret;
264
265 spin_lock(&nlru->lock);
Sebastian Andrzej Siewior6e018962018-08-17 15:49:51 -0700266 ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
267 nr_to_walk);
Sebastian Andrzej Siewior6cfe57a2018-08-17 15:49:48 -0700268 spin_unlock(&nlru->lock);
269 return ret;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800270}
271EXPORT_SYMBOL_GPL(list_lru_walk_one);
272
Sebastian Andrzej Siewior6b51e882018-08-17 15:49:55 -0700273unsigned long
274list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
275 list_lru_walk_cb isolate, void *cb_arg,
276 unsigned long *nr_to_walk)
277{
278 struct list_lru_node *nlru = &lru->node[nid];
279 unsigned long ret;
280
281 spin_lock_irq(&nlru->lock);
282 ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
283 nr_to_walk);
284 spin_unlock_irq(&nlru->lock);
285 return ret;
286}
287
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800288unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
289 list_lru_walk_cb isolate, void *cb_arg,
290 unsigned long *nr_to_walk)
291{
292 long isolated = 0;
293 int memcg_idx;
294
Sebastian Andrzej Siewior87a5ffc2018-08-17 15:49:45 -0700295 isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
296 nr_to_walk);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800297 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
298 for_each_memcg_cache_index(memcg_idx) {
Sebastian Andrzej Siewior6cfe57a2018-08-17 15:49:48 -0700299 struct list_lru_node *nlru = &lru->node[nid];
300
301 spin_lock(&nlru->lock);
Sebastian Andrzej Siewior6e018962018-08-17 15:49:51 -0700302 isolated += __list_lru_walk_one(nlru, memcg_idx,
303 isolate, cb_arg,
304 nr_to_walk);
Sebastian Andrzej Siewior6cfe57a2018-08-17 15:49:48 -0700305 spin_unlock(&nlru->lock);
306
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800307 if (*nr_to_walk <= 0)
308 break;
309 }
310 }
311 return isolated;
312}
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000313EXPORT_SYMBOL_GPL(list_lru_walk_node);
314
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800315static void init_one_lru(struct list_lru_one *l)
316{
317 INIT_LIST_HEAD(&l->list);
318 l->nr_items = 0;
319}
320
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700321#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800322static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
323 int begin, int end)
324{
325 int i;
326
327 for (i = begin; i < end; i++)
328 kfree(memcg_lrus->lru[i]);
329}
330
331static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
332 int begin, int end)
333{
334 int i;
335
336 for (i = begin; i < end; i++) {
337 struct list_lru_one *l;
338
339 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
340 if (!l)
341 goto fail;
342
343 init_one_lru(l);
344 memcg_lrus->lru[i] = l;
345 }
346 return 0;
347fail:
Shakeel Butt35109552019-06-13 15:55:49 -0700348 __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800349 return -ENOMEM;
350}
351
352static int memcg_init_list_lru_node(struct list_lru_node *nlru)
353{
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700354 struct list_lru_memcg *memcg_lrus;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800355 int size = memcg_nr_cache_ids;
356
Len Baker16f6bf22021-11-05 13:37:40 -0700357 memcg_lrus = kvmalloc(struct_size(memcg_lrus, lru, size), GFP_KERNEL);
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700358 if (!memcg_lrus)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800359 return -ENOMEM;
360
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700361 if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
362 kvfree(memcg_lrus);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800363 return -ENOMEM;
364 }
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700365 RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800366
367 return 0;
368}
369
370static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
371{
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700372 struct list_lru_memcg *memcg_lrus;
373 /*
374 * This is called when shrinker has already been unregistered,
Shakeel Butta7b7e1d2021-02-24 12:04:12 -0800375 * and nobody can use it. So, there is no need to use kvfree_rcu().
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700376 */
377 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
378 __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
379 kvfree(memcg_lrus);
380}
381
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800382static int memcg_update_list_lru_node(struct list_lru_node *nlru,
383 int old_size, int new_size)
384{
385 struct list_lru_memcg *old, *new;
386
387 BUG_ON(old_size > new_size);
388
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700389 old = rcu_dereference_protected(nlru->memcg_lrus,
390 lockdep_is_held(&list_lrus_mutex));
Len Baker16f6bf22021-11-05 13:37:40 -0700391 new = kvmalloc(struct_size(new, lru, new_size), GFP_KERNEL);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800392 if (!new)
393 return -ENOMEM;
394
395 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
Johannes Weinerf80c7da2017-10-03 16:16:10 -0700396 kvfree(new);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800397 return -ENOMEM;
398 }
399
Len Baker16f6bf22021-11-05 13:37:40 -0700400 memcpy(&new->lru, &old->lru, flex_array_size(new, lru, old_size));
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800401
402 /*
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700403 * The locking below allows readers that hold nlru->lock avoid taking
404 * rcu_read_lock (see list_lru_from_memcg_idx).
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800405 *
406 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
407 * we have to use IRQ-safe primitives here to avoid deadlock.
408 */
409 spin_lock_irq(&nlru->lock);
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700410 rcu_assign_pointer(nlru->memcg_lrus, new);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800411 spin_unlock_irq(&nlru->lock);
412
Shakeel Butta7b7e1d2021-02-24 12:04:12 -0800413 kvfree_rcu(old, rcu);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800414 return 0;
415}
416
417static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
418 int old_size, int new_size)
419{
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700420 struct list_lru_memcg *memcg_lrus;
421
422 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
423 lockdep_is_held(&list_lrus_mutex));
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800424 /* do not bother shrinking the array back to the old size, because we
425 * cannot handle allocation failures here */
Kirill Tkhai0c7c1be2018-04-05 16:25:08 -0700426 __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800427}
428
429static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
430{
431 int i;
432
Jiri Slaby3e858992019-05-31 22:30:26 -0700433 lru->memcg_aware = memcg_aware;
434
Raghavendra K T145949a2015-11-05 18:46:26 -0800435 if (!memcg_aware)
436 return 0;
437
438 for_each_node(i) {
439 if (memcg_init_list_lru_node(&lru->node[i]))
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800440 goto fail;
441 }
442 return 0;
443fail:
Raghavendra K T145949a2015-11-05 18:46:26 -0800444 for (i = i - 1; i >= 0; i--) {
445 if (!lru->node[i].memcg_lrus)
446 continue;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800447 memcg_destroy_list_lru_node(&lru->node[i]);
Raghavendra K T145949a2015-11-05 18:46:26 -0800448 }
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800449 return -ENOMEM;
450}
451
452static void memcg_destroy_list_lru(struct list_lru *lru)
453{
454 int i;
455
456 if (!list_lru_memcg_aware(lru))
457 return;
458
Raghavendra K T145949a2015-11-05 18:46:26 -0800459 for_each_node(i)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800460 memcg_destroy_list_lru_node(&lru->node[i]);
461}
462
463static int memcg_update_list_lru(struct list_lru *lru,
464 int old_size, int new_size)
465{
466 int i;
467
468 if (!list_lru_memcg_aware(lru))
469 return 0;
470
Raghavendra K T145949a2015-11-05 18:46:26 -0800471 for_each_node(i) {
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800472 if (memcg_update_list_lru_node(&lru->node[i],
473 old_size, new_size))
474 goto fail;
475 }
476 return 0;
477fail:
Raghavendra K T145949a2015-11-05 18:46:26 -0800478 for (i = i - 1; i >= 0; i--) {
479 if (!lru->node[i].memcg_lrus)
480 continue;
481
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800482 memcg_cancel_update_list_lru_node(&lru->node[i],
483 old_size, new_size);
Raghavendra K T145949a2015-11-05 18:46:26 -0800484 }
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800485 return -ENOMEM;
486}
487
488static void memcg_cancel_update_list_lru(struct list_lru *lru,
489 int old_size, int new_size)
490{
491 int i;
492
493 if (!list_lru_memcg_aware(lru))
494 return;
495
Raghavendra K T145949a2015-11-05 18:46:26 -0800496 for_each_node(i)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800497 memcg_cancel_update_list_lru_node(&lru->node[i],
498 old_size, new_size);
499}
500
501int memcg_update_all_list_lrus(int new_size)
502{
503 int ret = 0;
504 struct list_lru *lru;
505 int old_size = memcg_nr_cache_ids;
506
507 mutex_lock(&list_lrus_mutex);
508 list_for_each_entry(lru, &list_lrus, list) {
509 ret = memcg_update_list_lru(lru, old_size, new_size);
510 if (ret)
511 goto fail;
512 }
513out:
514 mutex_unlock(&list_lrus_mutex);
515 return ret;
516fail:
517 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
518 memcg_cancel_update_list_lru(lru, old_size, new_size);
519 goto out;
520}
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800521
Kirill Tkhai3b82c4d2018-08-17 15:48:01 -0700522static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
Kirill Tkhai9bec5c32018-08-17 15:47:58 -0700523 int src_idx, struct mem_cgroup *dst_memcg)
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800524{
Kirill Tkhai3b82c4d2018-08-17 15:48:01 -0700525 struct list_lru_node *nlru = &lru->node[nid];
Kirill Tkhai9bec5c32018-08-17 15:47:58 -0700526 int dst_idx = dst_memcg->kmemcg_id;
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800527 struct list_lru_one *src, *dst;
528
529 /*
530 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
531 * we have to use IRQ-safe primitives here to avoid deadlock.
532 */
533 spin_lock_irq(&nlru->lock);
534
535 src = list_lru_from_memcg_idx(nlru, src_idx);
536 dst = list_lru_from_memcg_idx(nlru, dst_idx);
537
538 list_splice_init(&src->list, &dst->list);
Yang Shi8199be02020-12-05 22:14:48 -0800539
540 if (src->nr_items) {
541 dst->nr_items += src->nr_items;
Yang Shi2bfd3632021-05-04 18:36:11 -0700542 set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
Yang Shi8199be02020-12-05 22:14:48 -0800543 src->nr_items = 0;
544 }
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800545
546 spin_unlock_irq(&nlru->lock);
547}
548
549static void memcg_drain_list_lru(struct list_lru *lru,
Kirill Tkhai9bec5c32018-08-17 15:47:58 -0700550 int src_idx, struct mem_cgroup *dst_memcg)
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800551{
552 int i;
553
554 if (!list_lru_memcg_aware(lru))
555 return;
556
Raghavendra K T145949a2015-11-05 18:46:26 -0800557 for_each_node(i)
Kirill Tkhai3b82c4d2018-08-17 15:48:01 -0700558 memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800559}
560
Kirill Tkhai9bec5c32018-08-17 15:47:58 -0700561void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800562{
563 struct list_lru *lru;
564
565 mutex_lock(&list_lrus_mutex);
566 list_for_each_entry(lru, &list_lrus, list)
Kirill Tkhai9bec5c32018-08-17 15:47:58 -0700567 memcg_drain_list_lru(lru, src_idx, dst_memcg);
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800568 mutex_unlock(&list_lrus_mutex);
569}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800570#else
571static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
572{
573 return 0;
574}
575
576static void memcg_destroy_list_lru(struct list_lru *lru)
577{
578}
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700579#endif /* CONFIG_MEMCG_KMEM */
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800580
581int __list_lru_init(struct list_lru *lru, bool memcg_aware,
Kirill Tkhaic92e8e12018-08-17 15:47:50 -0700582 struct lock_class_key *key, struct shrinker *shrinker)
Dave Chinnera38e4082013-08-28 10:17:58 +1000583{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000584 int i;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800585 int err = -ENOMEM;
586
Kirill Tkhaic92e8e12018-08-17 15:47:50 -0700587#ifdef CONFIG_MEMCG_KMEM
588 if (shrinker)
589 lru->shrinker_id = shrinker->id;
590 else
591 lru->shrinker_id = -1;
592#endif
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800593 memcg_get_cache_ids();
Glauber Costa5ca302c2013-08-28 10:18:18 +1000594
Alexey Dobriyanb9726c22019-03-05 15:48:26 -0800595 lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000596 if (!lru->node)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800597 goto out;
Dave Chinnera38e4082013-08-28 10:17:58 +1000598
Raghavendra K T145949a2015-11-05 18:46:26 -0800599 for_each_node(i) {
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000600 spin_lock_init(&lru->node[i].lock);
Johannes Weiner449dd692014-04-03 14:47:56 -0700601 if (key)
602 lockdep_set_class(&lru->node[i].lock, key);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800603 init_one_lru(&lru->node[i].lru);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000604 }
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800605
606 err = memcg_init_list_lru(lru, memcg_aware);
607 if (err) {
608 kfree(lru->node);
Alexander Polakov1bc11d72016-10-27 17:46:27 -0700609 /* Do this so a list_lru_destroy() doesn't crash: */
610 lru->node = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800611 goto out;
612 }
613
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800614 list_lru_register(lru);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800615out:
616 memcg_put_cache_ids();
617 return err;
Dave Chinnera38e4082013-08-28 10:17:58 +1000618}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800619EXPORT_SYMBOL_GPL(__list_lru_init);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000620
621void list_lru_destroy(struct list_lru *lru)
622{
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800623 /* Already destroyed or not yet initialized? */
624 if (!lru->node)
625 return;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800626
627 memcg_get_cache_ids();
628
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800629 list_lru_unregister(lru);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800630
631 memcg_destroy_list_lru(lru);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000632 kfree(lru->node);
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800633 lru->node = NULL;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800634
Kirill Tkhaic92e8e12018-08-17 15:47:50 -0700635#ifdef CONFIG_MEMCG_KMEM
636 lru->shrinker_id = -1;
637#endif
Vladimir Davydov60d3fd32015-02-12 14:59:10 -0800638 memcg_put_cache_ids();
Glauber Costa5ca302c2013-08-28 10:18:18 +1000639}
640EXPORT_SYMBOL_GPL(list_lru_destroy);