Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. |
| 3 | * Authors: David Chinner and Glauber Costa |
| 4 | * |
| 5 | * Generic LRU infrastructure |
| 6 | */ |
| 7 | #include <linux/kernel.h> |
| 8 | #include <linux/module.h> |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 9 | #include <linux/mm.h> |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 10 | #include <linux/list_lru.h> |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 11 | #include <linux/slab.h> |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 12 | #include <linux/mutex.h> |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 13 | #include <linux/memcontrol.h> |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 14 | |
Johannes Weiner | 127424c | 2016-01-20 15:02:32 -0800 | [diff] [blame] | 15 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 16 | static LIST_HEAD(list_lrus); |
| 17 | static DEFINE_MUTEX(list_lrus_mutex); |
| 18 | |
| 19 | static void list_lru_register(struct list_lru *lru) |
| 20 | { |
| 21 | mutex_lock(&list_lrus_mutex); |
| 22 | list_add(&lru->list, &list_lrus); |
| 23 | mutex_unlock(&list_lrus_mutex); |
| 24 | } |
| 25 | |
| 26 | static void list_lru_unregister(struct list_lru *lru) |
| 27 | { |
| 28 | mutex_lock(&list_lrus_mutex); |
| 29 | list_del(&lru->list); |
| 30 | mutex_unlock(&list_lrus_mutex); |
| 31 | } |
| 32 | #else |
| 33 | static void list_lru_register(struct list_lru *lru) |
| 34 | { |
| 35 | } |
| 36 | |
| 37 | static void list_lru_unregister(struct list_lru *lru) |
| 38 | { |
| 39 | } |
Johannes Weiner | 127424c | 2016-01-20 15:02:32 -0800 | [diff] [blame] | 40 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 41 | |
Johannes Weiner | 127424c | 2016-01-20 15:02:32 -0800 | [diff] [blame] | 42 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 43 | static inline bool list_lru_memcg_aware(struct list_lru *lru) |
| 44 | { |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 45 | /* |
| 46 | * This needs node 0 to be always present, even |
| 47 | * in the systems supporting sparse numa ids. |
| 48 | */ |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 49 | return !!lru->node[0].memcg_lrus; |
| 50 | } |
| 51 | |
| 52 | static inline struct list_lru_one * |
| 53 | list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) |
| 54 | { |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 55 | struct list_lru_memcg *memcg_lrus; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 56 | /* |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 57 | * Either lock or RCU protects the array of per cgroup lists |
| 58 | * from relocation (see memcg_update_list_lru_node). |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 59 | */ |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 60 | memcg_lrus = rcu_dereference_check(nlru->memcg_lrus, |
| 61 | lockdep_is_held(&nlru->lock)); |
| 62 | if (memcg_lrus && idx >= 0) |
| 63 | return memcg_lrus->lru[idx]; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 64 | return &nlru->lru; |
| 65 | } |
| 66 | |
Vladimir Davydov | df40655 | 2015-11-05 18:49:04 -0800 | [diff] [blame] | 67 | static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) |
| 68 | { |
| 69 | struct page *page; |
| 70 | |
| 71 | if (!memcg_kmem_enabled()) |
| 72 | return NULL; |
| 73 | page = virt_to_head_page(ptr); |
| 74 | return page->mem_cgroup; |
| 75 | } |
| 76 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 77 | static inline struct list_lru_one * |
| 78 | list_lru_from_kmem(struct list_lru_node *nlru, void *ptr) |
| 79 | { |
| 80 | struct mem_cgroup *memcg; |
| 81 | |
| 82 | if (!nlru->memcg_lrus) |
| 83 | return &nlru->lru; |
| 84 | |
| 85 | memcg = mem_cgroup_from_kmem(ptr); |
| 86 | if (!memcg) |
| 87 | return &nlru->lru; |
| 88 | |
| 89 | return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); |
| 90 | } |
| 91 | #else |
| 92 | static inline bool list_lru_memcg_aware(struct list_lru *lru) |
| 93 | { |
| 94 | return false; |
| 95 | } |
| 96 | |
| 97 | static inline struct list_lru_one * |
| 98 | list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) |
| 99 | { |
| 100 | return &nlru->lru; |
| 101 | } |
| 102 | |
| 103 | static inline struct list_lru_one * |
| 104 | list_lru_from_kmem(struct list_lru_node *nlru, void *ptr) |
| 105 | { |
| 106 | return &nlru->lru; |
| 107 | } |
Johannes Weiner | 127424c | 2016-01-20 15:02:32 -0800 | [diff] [blame] | 108 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 109 | |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 110 | bool list_lru_add(struct list_lru *lru, struct list_head *item) |
| 111 | { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 112 | int nid = page_to_nid(virt_to_page(item)); |
| 113 | struct list_lru_node *nlru = &lru->node[nid]; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 114 | struct list_lru_one *l; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 115 | |
| 116 | spin_lock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 117 | if (list_empty(item)) { |
Jeff Layton | 26f5d76 | 2015-09-08 15:03:44 -0700 | [diff] [blame] | 118 | l = list_lru_from_kmem(nlru, item); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 119 | list_add_tail(item, &l->list); |
| 120 | l->nr_items++; |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 121 | nlru->nr_items++; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 122 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 123 | return true; |
| 124 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 125 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 126 | return false; |
| 127 | } |
| 128 | EXPORT_SYMBOL_GPL(list_lru_add); |
| 129 | |
| 130 | bool list_lru_del(struct list_lru *lru, struct list_head *item) |
| 131 | { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 132 | int nid = page_to_nid(virt_to_page(item)); |
| 133 | struct list_lru_node *nlru = &lru->node[nid]; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 134 | struct list_lru_one *l; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 135 | |
| 136 | spin_lock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 137 | if (!list_empty(item)) { |
Jeff Layton | 26f5d76 | 2015-09-08 15:03:44 -0700 | [diff] [blame] | 138 | l = list_lru_from_kmem(nlru, item); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 139 | list_del_init(item); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 140 | l->nr_items--; |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 141 | nlru->nr_items--; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 142 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 143 | return true; |
| 144 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 145 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 146 | return false; |
| 147 | } |
| 148 | EXPORT_SYMBOL_GPL(list_lru_del); |
| 149 | |
Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 150 | void list_lru_isolate(struct list_lru_one *list, struct list_head *item) |
| 151 | { |
| 152 | list_del_init(item); |
| 153 | list->nr_items--; |
| 154 | } |
| 155 | EXPORT_SYMBOL_GPL(list_lru_isolate); |
| 156 | |
| 157 | void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, |
| 158 | struct list_head *head) |
| 159 | { |
| 160 | list_move(item, head); |
| 161 | list->nr_items--; |
| 162 | } |
| 163 | EXPORT_SYMBOL_GPL(list_lru_isolate_move); |
| 164 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 165 | static unsigned long __list_lru_count_one(struct list_lru *lru, |
| 166 | int nid, int memcg_idx) |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 167 | { |
Glauber Costa | 6a4f496 | 2013-08-28 10:18:02 +1000 | [diff] [blame] | 168 | struct list_lru_node *nlru = &lru->node[nid]; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 169 | struct list_lru_one *l; |
| 170 | unsigned long count; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 171 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 172 | rcu_read_lock(); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 173 | l = list_lru_from_memcg_idx(nlru, memcg_idx); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 174 | count = l->nr_items; |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 175 | rcu_read_unlock(); |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 176 | |
| 177 | return count; |
| 178 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 179 | |
| 180 | unsigned long list_lru_count_one(struct list_lru *lru, |
| 181 | int nid, struct mem_cgroup *memcg) |
| 182 | { |
| 183 | return __list_lru_count_one(lru, nid, memcg_cache_id(memcg)); |
| 184 | } |
| 185 | EXPORT_SYMBOL_GPL(list_lru_count_one); |
| 186 | |
| 187 | unsigned long list_lru_count_node(struct list_lru *lru, int nid) |
| 188 | { |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 189 | struct list_lru_node *nlru; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 190 | |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 191 | nlru = &lru->node[nid]; |
| 192 | return nlru->nr_items; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 193 | } |
Glauber Costa | 6a4f496 | 2013-08-28 10:18:02 +1000 | [diff] [blame] | 194 | EXPORT_SYMBOL_GPL(list_lru_count_node); |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 195 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 196 | static unsigned long |
| 197 | __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, |
| 198 | list_lru_walk_cb isolate, void *cb_arg, |
| 199 | unsigned long *nr_to_walk) |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 200 | { |
| 201 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 202 | struct list_lru_node *nlru = &lru->node[nid]; |
| 203 | struct list_lru_one *l; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 204 | struct list_head *item, *n; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 205 | unsigned long isolated = 0; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 206 | |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 207 | spin_lock(&nlru->lock); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 208 | l = list_lru_from_memcg_idx(nlru, memcg_idx); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 209 | restart: |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 210 | list_for_each_safe(item, n, &l->list) { |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 211 | enum lru_status ret; |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 212 | |
| 213 | /* |
| 214 | * decrement nr_to_walk first so that we don't livelock if we |
| 215 | * get stuck on large numbesr of LRU_RETRY items |
| 216 | */ |
Russell King | c56b097 | 2013-10-30 14:16:16 +0000 | [diff] [blame] | 217 | if (!*nr_to_walk) |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 218 | break; |
Russell King | c56b097 | 2013-10-30 14:16:16 +0000 | [diff] [blame] | 219 | --*nr_to_walk; |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 220 | |
Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 221 | ret = isolate(item, l, &nlru->lock, cb_arg); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 222 | switch (ret) { |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 223 | case LRU_REMOVED_RETRY: |
| 224 | assert_spin_locked(&nlru->lock); |
Gustavo A. R. Silva | 5b568ac | 2017-11-15 17:38:49 -0800 | [diff] [blame] | 225 | /* fall through */ |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 226 | case LRU_REMOVED: |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 227 | isolated++; |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 228 | nlru->nr_items--; |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 229 | /* |
| 230 | * If the lru lock has been dropped, our list |
| 231 | * traversal is now invalid and so we have to |
| 232 | * restart from scratch. |
| 233 | */ |
| 234 | if (ret == LRU_REMOVED_RETRY) |
| 235 | goto restart; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 236 | break; |
| 237 | case LRU_ROTATE: |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 238 | list_move_tail(item, &l->list); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 239 | break; |
| 240 | case LRU_SKIP: |
| 241 | break; |
| 242 | case LRU_RETRY: |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 243 | /* |
| 244 | * The lru lock has been dropped, our list traversal is |
| 245 | * now invalid and so we have to restart from scratch. |
| 246 | */ |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 247 | assert_spin_locked(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 248 | goto restart; |
| 249 | default: |
| 250 | BUG(); |
| 251 | } |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 252 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 253 | |
| 254 | spin_unlock(&nlru->lock); |
| 255 | return isolated; |
| 256 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 257 | |
| 258 | unsigned long |
| 259 | list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, |
| 260 | list_lru_walk_cb isolate, void *cb_arg, |
| 261 | unsigned long *nr_to_walk) |
| 262 | { |
| 263 | return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), |
| 264 | isolate, cb_arg, nr_to_walk); |
| 265 | } |
| 266 | EXPORT_SYMBOL_GPL(list_lru_walk_one); |
| 267 | |
| 268 | unsigned long list_lru_walk_node(struct list_lru *lru, int nid, |
| 269 | list_lru_walk_cb isolate, void *cb_arg, |
| 270 | unsigned long *nr_to_walk) |
| 271 | { |
| 272 | long isolated = 0; |
| 273 | int memcg_idx; |
| 274 | |
| 275 | isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg, |
| 276 | nr_to_walk); |
| 277 | if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { |
| 278 | for_each_memcg_cache_index(memcg_idx) { |
| 279 | isolated += __list_lru_walk_one(lru, nid, memcg_idx, |
| 280 | isolate, cb_arg, nr_to_walk); |
| 281 | if (*nr_to_walk <= 0) |
| 282 | break; |
| 283 | } |
| 284 | } |
| 285 | return isolated; |
| 286 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 287 | EXPORT_SYMBOL_GPL(list_lru_walk_node); |
| 288 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 289 | static void init_one_lru(struct list_lru_one *l) |
| 290 | { |
| 291 | INIT_LIST_HEAD(&l->list); |
| 292 | l->nr_items = 0; |
| 293 | } |
| 294 | |
Johannes Weiner | 127424c | 2016-01-20 15:02:32 -0800 | [diff] [blame] | 295 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 296 | static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus, |
| 297 | int begin, int end) |
| 298 | { |
| 299 | int i; |
| 300 | |
| 301 | for (i = begin; i < end; i++) |
| 302 | kfree(memcg_lrus->lru[i]); |
| 303 | } |
| 304 | |
| 305 | static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus, |
| 306 | int begin, int end) |
| 307 | { |
| 308 | int i; |
| 309 | |
| 310 | for (i = begin; i < end; i++) { |
| 311 | struct list_lru_one *l; |
| 312 | |
| 313 | l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL); |
| 314 | if (!l) |
| 315 | goto fail; |
| 316 | |
| 317 | init_one_lru(l); |
| 318 | memcg_lrus->lru[i] = l; |
| 319 | } |
| 320 | return 0; |
| 321 | fail: |
| 322 | __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1); |
| 323 | return -ENOMEM; |
| 324 | } |
| 325 | |
| 326 | static int memcg_init_list_lru_node(struct list_lru_node *nlru) |
| 327 | { |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 328 | struct list_lru_memcg *memcg_lrus; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 329 | int size = memcg_nr_cache_ids; |
| 330 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 331 | memcg_lrus = kvmalloc(sizeof(*memcg_lrus) + |
| 332 | size * sizeof(void *), GFP_KERNEL); |
| 333 | if (!memcg_lrus) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 334 | return -ENOMEM; |
| 335 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 336 | if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) { |
| 337 | kvfree(memcg_lrus); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 338 | return -ENOMEM; |
| 339 | } |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 340 | RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 341 | |
| 342 | return 0; |
| 343 | } |
| 344 | |
| 345 | static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) |
| 346 | { |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 347 | struct list_lru_memcg *memcg_lrus; |
| 348 | /* |
| 349 | * This is called when shrinker has already been unregistered, |
| 350 | * and nobody can use it. So, there is no need to use kvfree_rcu(). |
| 351 | */ |
| 352 | memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true); |
| 353 | __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids); |
| 354 | kvfree(memcg_lrus); |
| 355 | } |
| 356 | |
| 357 | static void kvfree_rcu(struct rcu_head *head) |
| 358 | { |
| 359 | struct list_lru_memcg *mlru; |
| 360 | |
| 361 | mlru = container_of(head, struct list_lru_memcg, rcu); |
| 362 | kvfree(mlru); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 363 | } |
| 364 | |
| 365 | static int memcg_update_list_lru_node(struct list_lru_node *nlru, |
| 366 | int old_size, int new_size) |
| 367 | { |
| 368 | struct list_lru_memcg *old, *new; |
| 369 | |
| 370 | BUG_ON(old_size > new_size); |
| 371 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 372 | old = rcu_dereference_protected(nlru->memcg_lrus, |
| 373 | lockdep_is_held(&list_lrus_mutex)); |
| 374 | new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 375 | if (!new) |
| 376 | return -ENOMEM; |
| 377 | |
| 378 | if (__memcg_init_list_lru_node(new, old_size, new_size)) { |
Johannes Weiner | f80c7da | 2017-10-03 16:16:10 -0700 | [diff] [blame] | 379 | kvfree(new); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 380 | return -ENOMEM; |
| 381 | } |
| 382 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 383 | memcpy(&new->lru, &old->lru, old_size * sizeof(void *)); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 384 | |
| 385 | /* |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 386 | * The locking below allows readers that hold nlru->lock avoid taking |
| 387 | * rcu_read_lock (see list_lru_from_memcg_idx). |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 388 | * |
| 389 | * Since list_lru_{add,del} may be called under an IRQ-safe lock, |
| 390 | * we have to use IRQ-safe primitives here to avoid deadlock. |
| 391 | */ |
| 392 | spin_lock_irq(&nlru->lock); |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 393 | rcu_assign_pointer(nlru->memcg_lrus, new); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 394 | spin_unlock_irq(&nlru->lock); |
| 395 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 396 | call_rcu(&old->rcu, kvfree_rcu); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 397 | return 0; |
| 398 | } |
| 399 | |
| 400 | static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru, |
| 401 | int old_size, int new_size) |
| 402 | { |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 403 | struct list_lru_memcg *memcg_lrus; |
| 404 | |
| 405 | memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, |
| 406 | lockdep_is_held(&list_lrus_mutex)); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 407 | /* do not bother shrinking the array back to the old size, because we |
| 408 | * cannot handle allocation failures here */ |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame^] | 409 | __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 410 | } |
| 411 | |
| 412 | static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) |
| 413 | { |
| 414 | int i; |
| 415 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 416 | if (!memcg_aware) |
| 417 | return 0; |
| 418 | |
| 419 | for_each_node(i) { |
| 420 | if (memcg_init_list_lru_node(&lru->node[i])) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 421 | goto fail; |
| 422 | } |
| 423 | return 0; |
| 424 | fail: |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 425 | for (i = i - 1; i >= 0; i--) { |
| 426 | if (!lru->node[i].memcg_lrus) |
| 427 | continue; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 428 | memcg_destroy_list_lru_node(&lru->node[i]); |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 429 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 430 | return -ENOMEM; |
| 431 | } |
| 432 | |
| 433 | static void memcg_destroy_list_lru(struct list_lru *lru) |
| 434 | { |
| 435 | int i; |
| 436 | |
| 437 | if (!list_lru_memcg_aware(lru)) |
| 438 | return; |
| 439 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 440 | for_each_node(i) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 441 | memcg_destroy_list_lru_node(&lru->node[i]); |
| 442 | } |
| 443 | |
| 444 | static int memcg_update_list_lru(struct list_lru *lru, |
| 445 | int old_size, int new_size) |
| 446 | { |
| 447 | int i; |
| 448 | |
| 449 | if (!list_lru_memcg_aware(lru)) |
| 450 | return 0; |
| 451 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 452 | for_each_node(i) { |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 453 | if (memcg_update_list_lru_node(&lru->node[i], |
| 454 | old_size, new_size)) |
| 455 | goto fail; |
| 456 | } |
| 457 | return 0; |
| 458 | fail: |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 459 | for (i = i - 1; i >= 0; i--) { |
| 460 | if (!lru->node[i].memcg_lrus) |
| 461 | continue; |
| 462 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 463 | memcg_cancel_update_list_lru_node(&lru->node[i], |
| 464 | old_size, new_size); |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 465 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 466 | return -ENOMEM; |
| 467 | } |
| 468 | |
| 469 | static void memcg_cancel_update_list_lru(struct list_lru *lru, |
| 470 | int old_size, int new_size) |
| 471 | { |
| 472 | int i; |
| 473 | |
| 474 | if (!list_lru_memcg_aware(lru)) |
| 475 | return; |
| 476 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 477 | for_each_node(i) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 478 | memcg_cancel_update_list_lru_node(&lru->node[i], |
| 479 | old_size, new_size); |
| 480 | } |
| 481 | |
| 482 | int memcg_update_all_list_lrus(int new_size) |
| 483 | { |
| 484 | int ret = 0; |
| 485 | struct list_lru *lru; |
| 486 | int old_size = memcg_nr_cache_ids; |
| 487 | |
| 488 | mutex_lock(&list_lrus_mutex); |
| 489 | list_for_each_entry(lru, &list_lrus, list) { |
| 490 | ret = memcg_update_list_lru(lru, old_size, new_size); |
| 491 | if (ret) |
| 492 | goto fail; |
| 493 | } |
| 494 | out: |
| 495 | mutex_unlock(&list_lrus_mutex); |
| 496 | return ret; |
| 497 | fail: |
| 498 | list_for_each_entry_continue_reverse(lru, &list_lrus, list) |
| 499 | memcg_cancel_update_list_lru(lru, old_size, new_size); |
| 500 | goto out; |
| 501 | } |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 502 | |
| 503 | static void memcg_drain_list_lru_node(struct list_lru_node *nlru, |
| 504 | int src_idx, int dst_idx) |
| 505 | { |
| 506 | struct list_lru_one *src, *dst; |
| 507 | |
| 508 | /* |
| 509 | * Since list_lru_{add,del} may be called under an IRQ-safe lock, |
| 510 | * we have to use IRQ-safe primitives here to avoid deadlock. |
| 511 | */ |
| 512 | spin_lock_irq(&nlru->lock); |
| 513 | |
| 514 | src = list_lru_from_memcg_idx(nlru, src_idx); |
| 515 | dst = list_lru_from_memcg_idx(nlru, dst_idx); |
| 516 | |
| 517 | list_splice_init(&src->list, &dst->list); |
| 518 | dst->nr_items += src->nr_items; |
| 519 | src->nr_items = 0; |
| 520 | |
| 521 | spin_unlock_irq(&nlru->lock); |
| 522 | } |
| 523 | |
| 524 | static void memcg_drain_list_lru(struct list_lru *lru, |
| 525 | int src_idx, int dst_idx) |
| 526 | { |
| 527 | int i; |
| 528 | |
| 529 | if (!list_lru_memcg_aware(lru)) |
| 530 | return; |
| 531 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 532 | for_each_node(i) |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 533 | memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx); |
| 534 | } |
| 535 | |
| 536 | void memcg_drain_all_list_lrus(int src_idx, int dst_idx) |
| 537 | { |
| 538 | struct list_lru *lru; |
| 539 | |
| 540 | mutex_lock(&list_lrus_mutex); |
| 541 | list_for_each_entry(lru, &list_lrus, list) |
| 542 | memcg_drain_list_lru(lru, src_idx, dst_idx); |
| 543 | mutex_unlock(&list_lrus_mutex); |
| 544 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 545 | #else |
| 546 | static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) |
| 547 | { |
| 548 | return 0; |
| 549 | } |
| 550 | |
| 551 | static void memcg_destroy_list_lru(struct list_lru *lru) |
| 552 | { |
| 553 | } |
Johannes Weiner | 127424c | 2016-01-20 15:02:32 -0800 | [diff] [blame] | 554 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 555 | |
| 556 | int __list_lru_init(struct list_lru *lru, bool memcg_aware, |
| 557 | struct lock_class_key *key) |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 558 | { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 559 | int i; |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 560 | size_t size = sizeof(*lru->node) * nr_node_ids; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 561 | int err = -ENOMEM; |
| 562 | |
| 563 | memcg_get_cache_ids(); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 564 | |
| 565 | lru->node = kzalloc(size, GFP_KERNEL); |
| 566 | if (!lru->node) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 567 | goto out; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 568 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 569 | for_each_node(i) { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 570 | spin_lock_init(&lru->node[i].lock); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 571 | if (key) |
| 572 | lockdep_set_class(&lru->node[i].lock, key); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 573 | init_one_lru(&lru->node[i].lru); |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 574 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 575 | |
| 576 | err = memcg_init_list_lru(lru, memcg_aware); |
| 577 | if (err) { |
| 578 | kfree(lru->node); |
Alexander Polakov | 1bc11d7 | 2016-10-27 17:46:27 -0700 | [diff] [blame] | 579 | /* Do this so a list_lru_destroy() doesn't crash: */ |
| 580 | lru->node = NULL; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 581 | goto out; |
| 582 | } |
| 583 | |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 584 | list_lru_register(lru); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 585 | out: |
| 586 | memcg_put_cache_ids(); |
| 587 | return err; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 588 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 589 | EXPORT_SYMBOL_GPL(__list_lru_init); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 590 | |
| 591 | void list_lru_destroy(struct list_lru *lru) |
| 592 | { |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 593 | /* Already destroyed or not yet initialized? */ |
| 594 | if (!lru->node) |
| 595 | return; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 596 | |
| 597 | memcg_get_cache_ids(); |
| 598 | |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 599 | list_lru_unregister(lru); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 600 | |
| 601 | memcg_destroy_list_lru(lru); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 602 | kfree(lru->node); |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 603 | lru->node = NULL; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 604 | |
| 605 | memcg_put_cache_ids(); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 606 | } |
| 607 | EXPORT_SYMBOL_GPL(list_lru_destroy); |