Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. |
| 4 | * Authors: David Chinner and Glauber Costa |
| 5 | * |
| 6 | * Generic LRU infrastructure |
| 7 | */ |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/module.h> |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 10 | #include <linux/mm.h> |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 11 | #include <linux/list_lru.h> |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 12 | #include <linux/slab.h> |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 13 | #include <linux/mutex.h> |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 14 | #include <linux/memcontrol.h> |
Roman Gushchin | 4d96ba3 | 2019-07-11 20:56:31 -0700 | [diff] [blame] | 15 | #include "slab.h" |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 16 | |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 17 | #ifdef CONFIG_MEMCG_KMEM |
Muchun Song | 3eef112 | 2021-11-05 13:37:59 -0700 | [diff] [blame] | 18 | static LIST_HEAD(memcg_list_lrus); |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 19 | static DEFINE_MUTEX(list_lrus_mutex); |
| 20 | |
Muchun Song | 3eef112 | 2021-11-05 13:37:59 -0700 | [diff] [blame] | 21 | static inline bool list_lru_memcg_aware(struct list_lru *lru) |
| 22 | { |
| 23 | return lru->memcg_aware; |
| 24 | } |
| 25 | |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 26 | static void list_lru_register(struct list_lru *lru) |
| 27 | { |
Muchun Song | 3eef112 | 2021-11-05 13:37:59 -0700 | [diff] [blame] | 28 | if (!list_lru_memcg_aware(lru)) |
| 29 | return; |
| 30 | |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 31 | mutex_lock(&list_lrus_mutex); |
Muchun Song | 3eef112 | 2021-11-05 13:37:59 -0700 | [diff] [blame] | 32 | list_add(&lru->list, &memcg_list_lrus); |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 33 | mutex_unlock(&list_lrus_mutex); |
| 34 | } |
| 35 | |
| 36 | static void list_lru_unregister(struct list_lru *lru) |
| 37 | { |
Muchun Song | 3eef112 | 2021-11-05 13:37:59 -0700 | [diff] [blame] | 38 | if (!list_lru_memcg_aware(lru)) |
| 39 | return; |
| 40 | |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 41 | mutex_lock(&list_lrus_mutex); |
| 42 | list_del(&lru->list); |
| 43 | mutex_unlock(&list_lrus_mutex); |
| 44 | } |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 45 | |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 46 | static int lru_shrinker_id(struct list_lru *lru) |
| 47 | { |
| 48 | return lru->shrinker_id; |
| 49 | } |
| 50 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 51 | static inline struct list_lru_one * |
| 52 | list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) |
| 53 | { |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 54 | struct list_lru_memcg *memcg_lrus; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 55 | /* |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 56 | * Either lock or RCU protects the array of per cgroup lists |
| 57 | * from relocation (see memcg_update_list_lru_node). |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 58 | */ |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 59 | memcg_lrus = rcu_dereference_check(nlru->memcg_lrus, |
| 60 | lockdep_is_held(&nlru->lock)); |
| 61 | if (memcg_lrus && idx >= 0) |
| 62 | return memcg_lrus->lru[idx]; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 63 | return &nlru->lru; |
| 64 | } |
| 65 | |
| 66 | static inline struct list_lru_one * |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 67 | list_lru_from_kmem(struct list_lru_node *nlru, void *ptr, |
| 68 | struct mem_cgroup **memcg_ptr) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 69 | { |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 70 | struct list_lru_one *l = &nlru->lru; |
| 71 | struct mem_cgroup *memcg = NULL; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 72 | |
| 73 | if (!nlru->memcg_lrus) |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 74 | goto out; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 75 | |
Roman Gushchin | 4f103c6 | 2020-04-01 21:06:36 -0700 | [diff] [blame] | 76 | memcg = mem_cgroup_from_obj(ptr); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 77 | if (!memcg) |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 78 | goto out; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 79 | |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 80 | l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); |
| 81 | out: |
| 82 | if (memcg_ptr) |
| 83 | *memcg_ptr = memcg; |
| 84 | return l; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 85 | } |
| 86 | #else |
Kirill Tkhai | e029523 | 2018-08-17 15:47:21 -0700 | [diff] [blame] | 87 | static void list_lru_register(struct list_lru *lru) |
| 88 | { |
| 89 | } |
| 90 | |
| 91 | static void list_lru_unregister(struct list_lru *lru) |
| 92 | { |
| 93 | } |
| 94 | |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 95 | static int lru_shrinker_id(struct list_lru *lru) |
| 96 | { |
| 97 | return -1; |
| 98 | } |
| 99 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 100 | static inline bool list_lru_memcg_aware(struct list_lru *lru) |
| 101 | { |
| 102 | return false; |
| 103 | } |
| 104 | |
| 105 | static inline struct list_lru_one * |
| 106 | list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) |
| 107 | { |
| 108 | return &nlru->lru; |
| 109 | } |
| 110 | |
| 111 | static inline struct list_lru_one * |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 112 | list_lru_from_kmem(struct list_lru_node *nlru, void *ptr, |
| 113 | struct mem_cgroup **memcg_ptr) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 114 | { |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 115 | if (memcg_ptr) |
| 116 | *memcg_ptr = NULL; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 117 | return &nlru->lru; |
| 118 | } |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 119 | #endif /* CONFIG_MEMCG_KMEM */ |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 120 | |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 121 | bool list_lru_add(struct list_lru *lru, struct list_head *item) |
| 122 | { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 123 | int nid = page_to_nid(virt_to_page(item)); |
| 124 | struct list_lru_node *nlru = &lru->node[nid]; |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 125 | struct mem_cgroup *memcg; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 126 | struct list_lru_one *l; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 127 | |
| 128 | spin_lock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 129 | if (list_empty(item)) { |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 130 | l = list_lru_from_kmem(nlru, item, &memcg); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 131 | list_add_tail(item, &l->list); |
Kirill Tkhai | fae91d6 | 2018-08-17 15:48:10 -0700 | [diff] [blame] | 132 | /* Set shrinker bit if the first element was added */ |
| 133 | if (!l->nr_items++) |
Yang Shi | 2bfd363 | 2021-05-04 18:36:11 -0700 | [diff] [blame] | 134 | set_shrinker_bit(memcg, nid, |
| 135 | lru_shrinker_id(lru)); |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 136 | nlru->nr_items++; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 137 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 138 | return true; |
| 139 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 140 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 141 | return false; |
| 142 | } |
| 143 | EXPORT_SYMBOL_GPL(list_lru_add); |
| 144 | |
| 145 | bool list_lru_del(struct list_lru *lru, struct list_head *item) |
| 146 | { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 147 | int nid = page_to_nid(virt_to_page(item)); |
| 148 | struct list_lru_node *nlru = &lru->node[nid]; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 149 | struct list_lru_one *l; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 150 | |
| 151 | spin_lock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 152 | if (!list_empty(item)) { |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 153 | l = list_lru_from_kmem(nlru, item, NULL); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 154 | list_del_init(item); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 155 | l->nr_items--; |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 156 | nlru->nr_items--; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 157 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 158 | return true; |
| 159 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 160 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 161 | return false; |
| 162 | } |
| 163 | EXPORT_SYMBOL_GPL(list_lru_del); |
| 164 | |
Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 165 | void list_lru_isolate(struct list_lru_one *list, struct list_head *item) |
| 166 | { |
| 167 | list_del_init(item); |
| 168 | list->nr_items--; |
| 169 | } |
| 170 | EXPORT_SYMBOL_GPL(list_lru_isolate); |
| 171 | |
| 172 | void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, |
| 173 | struct list_head *head) |
| 174 | { |
| 175 | list_move(item, head); |
| 176 | list->nr_items--; |
| 177 | } |
| 178 | EXPORT_SYMBOL_GPL(list_lru_isolate_move); |
| 179 | |
Andrew Morton | 930eaac | 2018-08-17 15:46:11 -0700 | [diff] [blame] | 180 | unsigned long list_lru_count_one(struct list_lru *lru, |
| 181 | int nid, struct mem_cgroup *memcg) |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 182 | { |
Glauber Costa | 6a4f496 | 2013-08-28 10:18:02 +1000 | [diff] [blame] | 183 | struct list_lru_node *nlru = &lru->node[nid]; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 184 | struct list_lru_one *l; |
Muchun Song | 41d1743 | 2021-11-05 13:37:50 -0700 | [diff] [blame] | 185 | long count; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 186 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 187 | rcu_read_lock(); |
Andrew Morton | 930eaac | 2018-08-17 15:46:11 -0700 | [diff] [blame] | 188 | l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); |
Qian Cai | a1f4593 | 2020-08-14 17:31:41 -0700 | [diff] [blame] | 189 | count = READ_ONCE(l->nr_items); |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 190 | rcu_read_unlock(); |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 191 | |
Muchun Song | 41d1743 | 2021-11-05 13:37:50 -0700 | [diff] [blame] | 192 | if (unlikely(count < 0)) |
| 193 | count = 0; |
| 194 | |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 195 | return count; |
| 196 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 197 | EXPORT_SYMBOL_GPL(list_lru_count_one); |
| 198 | |
| 199 | unsigned long list_lru_count_node(struct list_lru *lru, int nid) |
| 200 | { |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 201 | struct list_lru_node *nlru; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 202 | |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 203 | nlru = &lru->node[nid]; |
| 204 | return nlru->nr_items; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 205 | } |
Glauber Costa | 6a4f496 | 2013-08-28 10:18:02 +1000 | [diff] [blame] | 206 | EXPORT_SYMBOL_GPL(list_lru_count_node); |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 207 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 208 | static unsigned long |
Sebastian Andrzej Siewior | 6e01896 | 2018-08-17 15:49:51 -0700 | [diff] [blame] | 209 | __list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx, |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 210 | list_lru_walk_cb isolate, void *cb_arg, |
| 211 | unsigned long *nr_to_walk) |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 212 | { |
| 213 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 214 | struct list_lru_one *l; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 215 | struct list_head *item, *n; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 216 | unsigned long isolated = 0; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 217 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 218 | l = list_lru_from_memcg_idx(nlru, memcg_idx); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 219 | restart: |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 220 | list_for_each_safe(item, n, &l->list) { |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 221 | enum lru_status ret; |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 222 | |
| 223 | /* |
| 224 | * decrement nr_to_walk first so that we don't livelock if we |
Ethon Paul | 3dc5f03 | 2020-06-04 16:49:19 -0700 | [diff] [blame] | 225 | * get stuck on large numbers of LRU_RETRY items |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 226 | */ |
Russell King | c56b097 | 2013-10-30 14:16:16 +0000 | [diff] [blame] | 227 | if (!*nr_to_walk) |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 228 | break; |
Russell King | c56b097 | 2013-10-30 14:16:16 +0000 | [diff] [blame] | 229 | --*nr_to_walk; |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 230 | |
Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 231 | ret = isolate(item, l, &nlru->lock, cb_arg); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 232 | switch (ret) { |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 233 | case LRU_REMOVED_RETRY: |
| 234 | assert_spin_locked(&nlru->lock); |
Joe Perches | e4a9bc5 | 2020-04-06 20:08:39 -0700 | [diff] [blame] | 235 | fallthrough; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 236 | case LRU_REMOVED: |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 237 | isolated++; |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 238 | nlru->nr_items--; |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 239 | /* |
| 240 | * If the lru lock has been dropped, our list |
| 241 | * traversal is now invalid and so we have to |
| 242 | * restart from scratch. |
| 243 | */ |
| 244 | if (ret == LRU_REMOVED_RETRY) |
| 245 | goto restart; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 246 | break; |
| 247 | case LRU_ROTATE: |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 248 | list_move_tail(item, &l->list); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 249 | break; |
| 250 | case LRU_SKIP: |
| 251 | break; |
| 252 | case LRU_RETRY: |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 253 | /* |
| 254 | * The lru lock has been dropped, our list traversal is |
| 255 | * now invalid and so we have to restart from scratch. |
| 256 | */ |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 257 | assert_spin_locked(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 258 | goto restart; |
| 259 | default: |
| 260 | BUG(); |
| 261 | } |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 262 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 263 | return isolated; |
| 264 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 265 | |
| 266 | unsigned long |
| 267 | list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, |
| 268 | list_lru_walk_cb isolate, void *cb_arg, |
| 269 | unsigned long *nr_to_walk) |
| 270 | { |
Sebastian Andrzej Siewior | 6cfe57a | 2018-08-17 15:49:48 -0700 | [diff] [blame] | 271 | struct list_lru_node *nlru = &lru->node[nid]; |
| 272 | unsigned long ret; |
| 273 | |
| 274 | spin_lock(&nlru->lock); |
Sebastian Andrzej Siewior | 6e01896 | 2018-08-17 15:49:51 -0700 | [diff] [blame] | 275 | ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, |
| 276 | nr_to_walk); |
Sebastian Andrzej Siewior | 6cfe57a | 2018-08-17 15:49:48 -0700 | [diff] [blame] | 277 | spin_unlock(&nlru->lock); |
| 278 | return ret; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 279 | } |
| 280 | EXPORT_SYMBOL_GPL(list_lru_walk_one); |
| 281 | |
Sebastian Andrzej Siewior | 6b51e88 | 2018-08-17 15:49:55 -0700 | [diff] [blame] | 282 | unsigned long |
| 283 | list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg, |
| 284 | list_lru_walk_cb isolate, void *cb_arg, |
| 285 | unsigned long *nr_to_walk) |
| 286 | { |
| 287 | struct list_lru_node *nlru = &lru->node[nid]; |
| 288 | unsigned long ret; |
| 289 | |
| 290 | spin_lock_irq(&nlru->lock); |
| 291 | ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, |
| 292 | nr_to_walk); |
| 293 | spin_unlock_irq(&nlru->lock); |
| 294 | return ret; |
| 295 | } |
| 296 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 297 | unsigned long list_lru_walk_node(struct list_lru *lru, int nid, |
| 298 | list_lru_walk_cb isolate, void *cb_arg, |
| 299 | unsigned long *nr_to_walk) |
| 300 | { |
| 301 | long isolated = 0; |
| 302 | int memcg_idx; |
| 303 | |
Sebastian Andrzej Siewior | 87a5ffc | 2018-08-17 15:49:45 -0700 | [diff] [blame] | 304 | isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg, |
| 305 | nr_to_walk); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 306 | if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { |
| 307 | for_each_memcg_cache_index(memcg_idx) { |
Sebastian Andrzej Siewior | 6cfe57a | 2018-08-17 15:49:48 -0700 | [diff] [blame] | 308 | struct list_lru_node *nlru = &lru->node[nid]; |
| 309 | |
| 310 | spin_lock(&nlru->lock); |
Sebastian Andrzej Siewior | 6e01896 | 2018-08-17 15:49:51 -0700 | [diff] [blame] | 311 | isolated += __list_lru_walk_one(nlru, memcg_idx, |
| 312 | isolate, cb_arg, |
| 313 | nr_to_walk); |
Sebastian Andrzej Siewior | 6cfe57a | 2018-08-17 15:49:48 -0700 | [diff] [blame] | 314 | spin_unlock(&nlru->lock); |
| 315 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 316 | if (*nr_to_walk <= 0) |
| 317 | break; |
| 318 | } |
| 319 | } |
| 320 | return isolated; |
| 321 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 322 | EXPORT_SYMBOL_GPL(list_lru_walk_node); |
| 323 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 324 | static void init_one_lru(struct list_lru_one *l) |
| 325 | { |
| 326 | INIT_LIST_HEAD(&l->list); |
| 327 | l->nr_items = 0; |
| 328 | } |
| 329 | |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 330 | #ifdef CONFIG_MEMCG_KMEM |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 331 | static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus, |
| 332 | int begin, int end) |
| 333 | { |
| 334 | int i; |
| 335 | |
| 336 | for (i = begin; i < end; i++) |
| 337 | kfree(memcg_lrus->lru[i]); |
| 338 | } |
| 339 | |
| 340 | static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus, |
| 341 | int begin, int end) |
| 342 | { |
| 343 | int i; |
| 344 | |
| 345 | for (i = begin; i < end; i++) { |
| 346 | struct list_lru_one *l; |
| 347 | |
| 348 | l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL); |
| 349 | if (!l) |
| 350 | goto fail; |
| 351 | |
| 352 | init_one_lru(l); |
| 353 | memcg_lrus->lru[i] = l; |
| 354 | } |
| 355 | return 0; |
| 356 | fail: |
Shakeel Butt | 3510955 | 2019-06-13 15:55:49 -0700 | [diff] [blame] | 357 | __memcg_destroy_list_lru_node(memcg_lrus, begin, i); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 358 | return -ENOMEM; |
| 359 | } |
| 360 | |
| 361 | static int memcg_init_list_lru_node(struct list_lru_node *nlru) |
| 362 | { |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 363 | struct list_lru_memcg *memcg_lrus; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 364 | int size = memcg_nr_cache_ids; |
| 365 | |
Len Baker | 16f6bf2 | 2021-11-05 13:37:40 -0700 | [diff] [blame] | 366 | memcg_lrus = kvmalloc(struct_size(memcg_lrus, lru, size), GFP_KERNEL); |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 367 | if (!memcg_lrus) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 368 | return -ENOMEM; |
| 369 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 370 | if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) { |
| 371 | kvfree(memcg_lrus); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 372 | return -ENOMEM; |
| 373 | } |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 374 | RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 375 | |
| 376 | return 0; |
| 377 | } |
| 378 | |
| 379 | static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) |
| 380 | { |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 381 | struct list_lru_memcg *memcg_lrus; |
| 382 | /* |
| 383 | * This is called when shrinker has already been unregistered, |
Shakeel Butt | a7b7e1d | 2021-02-24 12:04:12 -0800 | [diff] [blame] | 384 | * and nobody can use it. So, there is no need to use kvfree_rcu(). |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 385 | */ |
| 386 | memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true); |
| 387 | __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids); |
| 388 | kvfree(memcg_lrus); |
| 389 | } |
| 390 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 391 | static int memcg_update_list_lru_node(struct list_lru_node *nlru, |
| 392 | int old_size, int new_size) |
| 393 | { |
| 394 | struct list_lru_memcg *old, *new; |
| 395 | |
| 396 | BUG_ON(old_size > new_size); |
| 397 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 398 | old = rcu_dereference_protected(nlru->memcg_lrus, |
| 399 | lockdep_is_held(&list_lrus_mutex)); |
Len Baker | 16f6bf2 | 2021-11-05 13:37:40 -0700 | [diff] [blame] | 400 | new = kvmalloc(struct_size(new, lru, new_size), GFP_KERNEL); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 401 | if (!new) |
| 402 | return -ENOMEM; |
| 403 | |
| 404 | if (__memcg_init_list_lru_node(new, old_size, new_size)) { |
Johannes Weiner | f80c7da | 2017-10-03 16:16:10 -0700 | [diff] [blame] | 405 | kvfree(new); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 406 | return -ENOMEM; |
| 407 | } |
| 408 | |
Len Baker | 16f6bf2 | 2021-11-05 13:37:40 -0700 | [diff] [blame] | 409 | memcpy(&new->lru, &old->lru, flex_array_size(new, lru, old_size)); |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 410 | rcu_assign_pointer(nlru->memcg_lrus, new); |
Shakeel Butt | a7b7e1d | 2021-02-24 12:04:12 -0800 | [diff] [blame] | 411 | kvfree_rcu(old, rcu); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 412 | return 0; |
| 413 | } |
| 414 | |
| 415 | static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru, |
| 416 | int old_size, int new_size) |
| 417 | { |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 418 | struct list_lru_memcg *memcg_lrus; |
| 419 | |
| 420 | memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, |
| 421 | lockdep_is_held(&list_lrus_mutex)); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 422 | /* do not bother shrinking the array back to the old size, because we |
| 423 | * cannot handle allocation failures here */ |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 424 | __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 425 | } |
| 426 | |
| 427 | static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) |
| 428 | { |
| 429 | int i; |
| 430 | |
Jiri Slaby | 3e85899 | 2019-05-31 22:30:26 -0700 | [diff] [blame] | 431 | lru->memcg_aware = memcg_aware; |
| 432 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 433 | if (!memcg_aware) |
| 434 | return 0; |
| 435 | |
| 436 | for_each_node(i) { |
| 437 | if (memcg_init_list_lru_node(&lru->node[i])) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 438 | goto fail; |
| 439 | } |
| 440 | return 0; |
| 441 | fail: |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 442 | for (i = i - 1; i >= 0; i--) { |
| 443 | if (!lru->node[i].memcg_lrus) |
| 444 | continue; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 445 | memcg_destroy_list_lru_node(&lru->node[i]); |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 446 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 447 | return -ENOMEM; |
| 448 | } |
| 449 | |
| 450 | static void memcg_destroy_list_lru(struct list_lru *lru) |
| 451 | { |
| 452 | int i; |
| 453 | |
| 454 | if (!list_lru_memcg_aware(lru)) |
| 455 | return; |
| 456 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 457 | for_each_node(i) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 458 | memcg_destroy_list_lru_node(&lru->node[i]); |
| 459 | } |
| 460 | |
| 461 | static int memcg_update_list_lru(struct list_lru *lru, |
| 462 | int old_size, int new_size) |
| 463 | { |
| 464 | int i; |
| 465 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 466 | for_each_node(i) { |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 467 | if (memcg_update_list_lru_node(&lru->node[i], |
| 468 | old_size, new_size)) |
| 469 | goto fail; |
| 470 | } |
| 471 | return 0; |
| 472 | fail: |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 473 | for (i = i - 1; i >= 0; i--) { |
| 474 | if (!lru->node[i].memcg_lrus) |
| 475 | continue; |
| 476 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 477 | memcg_cancel_update_list_lru_node(&lru->node[i], |
| 478 | old_size, new_size); |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 479 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 480 | return -ENOMEM; |
| 481 | } |
| 482 | |
| 483 | static void memcg_cancel_update_list_lru(struct list_lru *lru, |
| 484 | int old_size, int new_size) |
| 485 | { |
| 486 | int i; |
| 487 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 488 | for_each_node(i) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 489 | memcg_cancel_update_list_lru_node(&lru->node[i], |
| 490 | old_size, new_size); |
| 491 | } |
| 492 | |
| 493 | int memcg_update_all_list_lrus(int new_size) |
| 494 | { |
| 495 | int ret = 0; |
| 496 | struct list_lru *lru; |
| 497 | int old_size = memcg_nr_cache_ids; |
| 498 | |
| 499 | mutex_lock(&list_lrus_mutex); |
Muchun Song | 3eef112 | 2021-11-05 13:37:59 -0700 | [diff] [blame] | 500 | list_for_each_entry(lru, &memcg_list_lrus, list) { |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 501 | ret = memcg_update_list_lru(lru, old_size, new_size); |
| 502 | if (ret) |
| 503 | goto fail; |
| 504 | } |
| 505 | out: |
| 506 | mutex_unlock(&list_lrus_mutex); |
| 507 | return ret; |
| 508 | fail: |
Muchun Song | 3eef112 | 2021-11-05 13:37:59 -0700 | [diff] [blame] | 509 | list_for_each_entry_continue_reverse(lru, &memcg_list_lrus, list) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 510 | memcg_cancel_update_list_lru(lru, old_size, new_size); |
| 511 | goto out; |
| 512 | } |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 513 | |
Kirill Tkhai | 3b82c4d | 2018-08-17 15:48:01 -0700 | [diff] [blame] | 514 | static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame] | 515 | int src_idx, struct mem_cgroup *dst_memcg) |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 516 | { |
Kirill Tkhai | 3b82c4d | 2018-08-17 15:48:01 -0700 | [diff] [blame] | 517 | struct list_lru_node *nlru = &lru->node[nid]; |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame] | 518 | int dst_idx = dst_memcg->kmemcg_id; |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 519 | struct list_lru_one *src, *dst; |
| 520 | |
| 521 | /* |
| 522 | * Since list_lru_{add,del} may be called under an IRQ-safe lock, |
| 523 | * we have to use IRQ-safe primitives here to avoid deadlock. |
| 524 | */ |
| 525 | spin_lock_irq(&nlru->lock); |
| 526 | |
| 527 | src = list_lru_from_memcg_idx(nlru, src_idx); |
| 528 | dst = list_lru_from_memcg_idx(nlru, dst_idx); |
| 529 | |
| 530 | list_splice_init(&src->list, &dst->list); |
Yang Shi | 8199be0 | 2020-12-05 22:14:48 -0800 | [diff] [blame] | 531 | |
| 532 | if (src->nr_items) { |
| 533 | dst->nr_items += src->nr_items; |
Yang Shi | 2bfd363 | 2021-05-04 18:36:11 -0700 | [diff] [blame] | 534 | set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); |
Yang Shi | 8199be0 | 2020-12-05 22:14:48 -0800 | [diff] [blame] | 535 | src->nr_items = 0; |
| 536 | } |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 537 | |
| 538 | spin_unlock_irq(&nlru->lock); |
| 539 | } |
| 540 | |
| 541 | static void memcg_drain_list_lru(struct list_lru *lru, |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame] | 542 | int src_idx, struct mem_cgroup *dst_memcg) |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 543 | { |
| 544 | int i; |
| 545 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 546 | for_each_node(i) |
Kirill Tkhai | 3b82c4d | 2018-08-17 15:48:01 -0700 | [diff] [blame] | 547 | memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg); |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 548 | } |
| 549 | |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame] | 550 | void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg) |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 551 | { |
| 552 | struct list_lru *lru; |
| 553 | |
| 554 | mutex_lock(&list_lrus_mutex); |
Muchun Song | 3eef112 | 2021-11-05 13:37:59 -0700 | [diff] [blame] | 555 | list_for_each_entry(lru, &memcg_list_lrus, list) |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame] | 556 | memcg_drain_list_lru(lru, src_idx, dst_memcg); |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 557 | mutex_unlock(&list_lrus_mutex); |
| 558 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 559 | #else |
| 560 | static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) |
| 561 | { |
| 562 | return 0; |
| 563 | } |
| 564 | |
| 565 | static void memcg_destroy_list_lru(struct list_lru *lru) |
| 566 | { |
| 567 | } |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 568 | #endif /* CONFIG_MEMCG_KMEM */ |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 569 | |
| 570 | int __list_lru_init(struct list_lru *lru, bool memcg_aware, |
Kirill Tkhai | c92e8e1 | 2018-08-17 15:47:50 -0700 | [diff] [blame] | 571 | struct lock_class_key *key, struct shrinker *shrinker) |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 572 | { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 573 | int i; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 574 | int err = -ENOMEM; |
| 575 | |
Kirill Tkhai | c92e8e1 | 2018-08-17 15:47:50 -0700 | [diff] [blame] | 576 | #ifdef CONFIG_MEMCG_KMEM |
| 577 | if (shrinker) |
| 578 | lru->shrinker_id = shrinker->id; |
| 579 | else |
| 580 | lru->shrinker_id = -1; |
| 581 | #endif |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 582 | memcg_get_cache_ids(); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 583 | |
Alexey Dobriyan | b9726c2 | 2019-03-05 15:48:26 -0800 | [diff] [blame] | 584 | lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 585 | if (!lru->node) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 586 | goto out; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 587 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 588 | for_each_node(i) { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 589 | spin_lock_init(&lru->node[i].lock); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 590 | if (key) |
| 591 | lockdep_set_class(&lru->node[i].lock, key); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 592 | init_one_lru(&lru->node[i].lru); |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 593 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 594 | |
| 595 | err = memcg_init_list_lru(lru, memcg_aware); |
| 596 | if (err) { |
| 597 | kfree(lru->node); |
Alexander Polakov | 1bc11d7 | 2016-10-27 17:46:27 -0700 | [diff] [blame] | 598 | /* Do this so a list_lru_destroy() doesn't crash: */ |
| 599 | lru->node = NULL; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 600 | goto out; |
| 601 | } |
| 602 | |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 603 | list_lru_register(lru); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 604 | out: |
| 605 | memcg_put_cache_ids(); |
| 606 | return err; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 607 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 608 | EXPORT_SYMBOL_GPL(__list_lru_init); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 609 | |
| 610 | void list_lru_destroy(struct list_lru *lru) |
| 611 | { |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 612 | /* Already destroyed or not yet initialized? */ |
| 613 | if (!lru->node) |
| 614 | return; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 615 | |
| 616 | memcg_get_cache_ids(); |
| 617 | |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 618 | list_lru_unregister(lru); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 619 | |
| 620 | memcg_destroy_list_lru(lru); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 621 | kfree(lru->node); |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 622 | lru->node = NULL; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 623 | |
Kirill Tkhai | c92e8e1 | 2018-08-17 15:47:50 -0700 | [diff] [blame] | 624 | #ifdef CONFIG_MEMCG_KMEM |
| 625 | lru->shrinker_id = -1; |
| 626 | #endif |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 627 | memcg_put_cache_ids(); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 628 | } |
| 629 | EXPORT_SYMBOL_GPL(list_lru_destroy); |