Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. |
| 3 | * Authors: David Chinner and Glauber Costa |
| 4 | * |
| 5 | * Generic LRU infrastructure |
| 6 | */ |
| 7 | #include <linux/kernel.h> |
| 8 | #include <linux/module.h> |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 9 | #include <linux/mm.h> |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 10 | #include <linux/list_lru.h> |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 11 | #include <linux/slab.h> |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 12 | #include <linux/mutex.h> |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 13 | #include <linux/memcontrol.h> |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 14 | |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 15 | #ifdef CONFIG_MEMCG_KMEM |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 16 | static LIST_HEAD(list_lrus); |
| 17 | static DEFINE_MUTEX(list_lrus_mutex); |
| 18 | |
| 19 | static void list_lru_register(struct list_lru *lru) |
| 20 | { |
| 21 | mutex_lock(&list_lrus_mutex); |
| 22 | list_add(&lru->list, &list_lrus); |
| 23 | mutex_unlock(&list_lrus_mutex); |
| 24 | } |
| 25 | |
| 26 | static void list_lru_unregister(struct list_lru *lru) |
| 27 | { |
| 28 | mutex_lock(&list_lrus_mutex); |
| 29 | list_del(&lru->list); |
| 30 | mutex_unlock(&list_lrus_mutex); |
| 31 | } |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 32 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 33 | static inline bool list_lru_memcg_aware(struct list_lru *lru) |
| 34 | { |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 35 | /* |
| 36 | * This needs node 0 to be always present, even |
| 37 | * in the systems supporting sparse numa ids. |
| 38 | */ |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 39 | return !!lru->node[0].memcg_lrus; |
| 40 | } |
| 41 | |
| 42 | static inline struct list_lru_one * |
| 43 | list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) |
| 44 | { |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 45 | struct list_lru_memcg *memcg_lrus; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 46 | /* |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 47 | * Either lock or RCU protects the array of per cgroup lists |
| 48 | * from relocation (see memcg_update_list_lru_node). |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 49 | */ |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 50 | memcg_lrus = rcu_dereference_check(nlru->memcg_lrus, |
| 51 | lockdep_is_held(&nlru->lock)); |
| 52 | if (memcg_lrus && idx >= 0) |
| 53 | return memcg_lrus->lru[idx]; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 54 | return &nlru->lru; |
| 55 | } |
| 56 | |
Vladimir Davydov | df40655 | 2015-11-05 18:49:04 -0800 | [diff] [blame] | 57 | static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) |
| 58 | { |
| 59 | struct page *page; |
| 60 | |
| 61 | if (!memcg_kmem_enabled()) |
| 62 | return NULL; |
| 63 | page = virt_to_head_page(ptr); |
| 64 | return page->mem_cgroup; |
| 65 | } |
| 66 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 67 | static inline struct list_lru_one * |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 68 | list_lru_from_kmem(struct list_lru_node *nlru, void *ptr, |
| 69 | struct mem_cgroup **memcg_ptr) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 70 | { |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 71 | struct list_lru_one *l = &nlru->lru; |
| 72 | struct mem_cgroup *memcg = NULL; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 73 | |
| 74 | if (!nlru->memcg_lrus) |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 75 | goto out; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 76 | |
| 77 | memcg = mem_cgroup_from_kmem(ptr); |
| 78 | if (!memcg) |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 79 | goto out; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 80 | |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 81 | l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); |
| 82 | out: |
| 83 | if (memcg_ptr) |
| 84 | *memcg_ptr = memcg; |
| 85 | return l; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 86 | } |
| 87 | #else |
Kirill Tkhai | e029523 | 2018-08-17 15:47:21 -0700 | [diff] [blame] | 88 | static void list_lru_register(struct list_lru *lru) |
| 89 | { |
| 90 | } |
| 91 | |
| 92 | static void list_lru_unregister(struct list_lru *lru) |
| 93 | { |
| 94 | } |
| 95 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 96 | static inline bool list_lru_memcg_aware(struct list_lru *lru) |
| 97 | { |
| 98 | return false; |
| 99 | } |
| 100 | |
| 101 | static inline struct list_lru_one * |
| 102 | list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) |
| 103 | { |
| 104 | return &nlru->lru; |
| 105 | } |
| 106 | |
| 107 | static inline struct list_lru_one * |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 108 | list_lru_from_kmem(struct list_lru_node *nlru, void *ptr, |
| 109 | struct mem_cgroup **memcg_ptr) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 110 | { |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 111 | if (memcg_ptr) |
| 112 | *memcg_ptr = NULL; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 113 | return &nlru->lru; |
| 114 | } |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 115 | #endif /* CONFIG_MEMCG_KMEM */ |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 116 | |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 117 | bool list_lru_add(struct list_lru *lru, struct list_head *item) |
| 118 | { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 119 | int nid = page_to_nid(virt_to_page(item)); |
| 120 | struct list_lru_node *nlru = &lru->node[nid]; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 121 | struct list_lru_one *l; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 122 | |
| 123 | spin_lock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 124 | if (list_empty(item)) { |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 125 | l = list_lru_from_kmem(nlru, item, NULL); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 126 | list_add_tail(item, &l->list); |
| 127 | l->nr_items++; |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 128 | nlru->nr_items++; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 129 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 130 | return true; |
| 131 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 132 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 133 | return false; |
| 134 | } |
| 135 | EXPORT_SYMBOL_GPL(list_lru_add); |
| 136 | |
| 137 | bool list_lru_del(struct list_lru *lru, struct list_head *item) |
| 138 | { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 139 | int nid = page_to_nid(virt_to_page(item)); |
| 140 | struct list_lru_node *nlru = &lru->node[nid]; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 141 | struct list_lru_one *l; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 142 | |
| 143 | spin_lock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 144 | if (!list_empty(item)) { |
Kirill Tkhai | 44bd4a4 | 2018-08-17 15:47:54 -0700 | [diff] [blame] | 145 | l = list_lru_from_kmem(nlru, item, NULL); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 146 | list_del_init(item); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 147 | l->nr_items--; |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 148 | nlru->nr_items--; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 149 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 150 | return true; |
| 151 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 152 | spin_unlock(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 153 | return false; |
| 154 | } |
| 155 | EXPORT_SYMBOL_GPL(list_lru_del); |
| 156 | |
Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 157 | void list_lru_isolate(struct list_lru_one *list, struct list_head *item) |
| 158 | { |
| 159 | list_del_init(item); |
| 160 | list->nr_items--; |
| 161 | } |
| 162 | EXPORT_SYMBOL_GPL(list_lru_isolate); |
| 163 | |
| 164 | void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, |
| 165 | struct list_head *head) |
| 166 | { |
| 167 | list_move(item, head); |
| 168 | list->nr_items--; |
| 169 | } |
| 170 | EXPORT_SYMBOL_GPL(list_lru_isolate_move); |
| 171 | |
Andrew Morton | 930eaac | 2018-08-17 15:46:11 -0700 | [diff] [blame] | 172 | unsigned long list_lru_count_one(struct list_lru *lru, |
| 173 | int nid, struct mem_cgroup *memcg) |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 174 | { |
Glauber Costa | 6a4f496 | 2013-08-28 10:18:02 +1000 | [diff] [blame] | 175 | struct list_lru_node *nlru = &lru->node[nid]; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 176 | struct list_lru_one *l; |
| 177 | unsigned long count; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 178 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 179 | rcu_read_lock(); |
Andrew Morton | 930eaac | 2018-08-17 15:46:11 -0700 | [diff] [blame] | 180 | l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 181 | count = l->nr_items; |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 182 | rcu_read_unlock(); |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 183 | |
| 184 | return count; |
| 185 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 186 | EXPORT_SYMBOL_GPL(list_lru_count_one); |
| 187 | |
| 188 | unsigned long list_lru_count_node(struct list_lru *lru, int nid) |
| 189 | { |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 190 | struct list_lru_node *nlru; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 191 | |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 192 | nlru = &lru->node[nid]; |
| 193 | return nlru->nr_items; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 194 | } |
Glauber Costa | 6a4f496 | 2013-08-28 10:18:02 +1000 | [diff] [blame] | 195 | EXPORT_SYMBOL_GPL(list_lru_count_node); |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 196 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 197 | static unsigned long |
| 198 | __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, |
| 199 | list_lru_walk_cb isolate, void *cb_arg, |
| 200 | unsigned long *nr_to_walk) |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 201 | { |
| 202 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 203 | struct list_lru_node *nlru = &lru->node[nid]; |
| 204 | struct list_lru_one *l; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 205 | struct list_head *item, *n; |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 206 | unsigned long isolated = 0; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 207 | |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 208 | spin_lock(&nlru->lock); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 209 | l = list_lru_from_memcg_idx(nlru, memcg_idx); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 210 | restart: |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 211 | list_for_each_safe(item, n, &l->list) { |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 212 | enum lru_status ret; |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 213 | |
| 214 | /* |
| 215 | * decrement nr_to_walk first so that we don't livelock if we |
| 216 | * get stuck on large numbesr of LRU_RETRY items |
| 217 | */ |
Russell King | c56b097 | 2013-10-30 14:16:16 +0000 | [diff] [blame] | 218 | if (!*nr_to_walk) |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 219 | break; |
Russell King | c56b097 | 2013-10-30 14:16:16 +0000 | [diff] [blame] | 220 | --*nr_to_walk; |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 221 | |
Vladimir Davydov | 3f97b16 | 2015-02-12 14:59:35 -0800 | [diff] [blame] | 222 | ret = isolate(item, l, &nlru->lock, cb_arg); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 223 | switch (ret) { |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 224 | case LRU_REMOVED_RETRY: |
| 225 | assert_spin_locked(&nlru->lock); |
Gustavo A. R. Silva | 5b568ac | 2017-11-15 17:38:49 -0800 | [diff] [blame] | 226 | /* fall through */ |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 227 | case LRU_REMOVED: |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 228 | isolated++; |
Sahitya Tummala | 2c80cd5 | 2017-07-10 15:49:57 -0700 | [diff] [blame] | 229 | nlru->nr_items--; |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 230 | /* |
| 231 | * If the lru lock has been dropped, our list |
| 232 | * traversal is now invalid and so we have to |
| 233 | * restart from scratch. |
| 234 | */ |
| 235 | if (ret == LRU_REMOVED_RETRY) |
| 236 | goto restart; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 237 | break; |
| 238 | case LRU_ROTATE: |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 239 | list_move_tail(item, &l->list); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 240 | break; |
| 241 | case LRU_SKIP: |
| 242 | break; |
| 243 | case LRU_RETRY: |
Dave Chinner | 5cedf721 | 2013-08-28 10:18:01 +1000 | [diff] [blame] | 244 | /* |
| 245 | * The lru lock has been dropped, our list traversal is |
| 246 | * now invalid and so we have to restart from scratch. |
| 247 | */ |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 248 | assert_spin_locked(&nlru->lock); |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 249 | goto restart; |
| 250 | default: |
| 251 | BUG(); |
| 252 | } |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 253 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 254 | |
| 255 | spin_unlock(&nlru->lock); |
| 256 | return isolated; |
| 257 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 258 | |
| 259 | unsigned long |
| 260 | list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, |
| 261 | list_lru_walk_cb isolate, void *cb_arg, |
| 262 | unsigned long *nr_to_walk) |
| 263 | { |
| 264 | return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), |
| 265 | isolate, cb_arg, nr_to_walk); |
| 266 | } |
| 267 | EXPORT_SYMBOL_GPL(list_lru_walk_one); |
| 268 | |
| 269 | unsigned long list_lru_walk_node(struct list_lru *lru, int nid, |
| 270 | list_lru_walk_cb isolate, void *cb_arg, |
| 271 | unsigned long *nr_to_walk) |
| 272 | { |
| 273 | long isolated = 0; |
| 274 | int memcg_idx; |
| 275 | |
| 276 | isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg, |
| 277 | nr_to_walk); |
| 278 | if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { |
| 279 | for_each_memcg_cache_index(memcg_idx) { |
| 280 | isolated += __list_lru_walk_one(lru, nid, memcg_idx, |
| 281 | isolate, cb_arg, nr_to_walk); |
| 282 | if (*nr_to_walk <= 0) |
| 283 | break; |
| 284 | } |
| 285 | } |
| 286 | return isolated; |
| 287 | } |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 288 | EXPORT_SYMBOL_GPL(list_lru_walk_node); |
| 289 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 290 | static void init_one_lru(struct list_lru_one *l) |
| 291 | { |
| 292 | INIT_LIST_HEAD(&l->list); |
| 293 | l->nr_items = 0; |
| 294 | } |
| 295 | |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 296 | #ifdef CONFIG_MEMCG_KMEM |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 297 | static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus, |
| 298 | int begin, int end) |
| 299 | { |
| 300 | int i; |
| 301 | |
| 302 | for (i = begin; i < end; i++) |
| 303 | kfree(memcg_lrus->lru[i]); |
| 304 | } |
| 305 | |
| 306 | static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus, |
| 307 | int begin, int end) |
| 308 | { |
| 309 | int i; |
| 310 | |
| 311 | for (i = begin; i < end; i++) { |
| 312 | struct list_lru_one *l; |
| 313 | |
| 314 | l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL); |
| 315 | if (!l) |
| 316 | goto fail; |
| 317 | |
| 318 | init_one_lru(l); |
| 319 | memcg_lrus->lru[i] = l; |
| 320 | } |
| 321 | return 0; |
| 322 | fail: |
| 323 | __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1); |
| 324 | return -ENOMEM; |
| 325 | } |
| 326 | |
| 327 | static int memcg_init_list_lru_node(struct list_lru_node *nlru) |
| 328 | { |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 329 | struct list_lru_memcg *memcg_lrus; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 330 | int size = memcg_nr_cache_ids; |
| 331 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 332 | memcg_lrus = kvmalloc(sizeof(*memcg_lrus) + |
| 333 | size * sizeof(void *), GFP_KERNEL); |
| 334 | if (!memcg_lrus) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 335 | return -ENOMEM; |
| 336 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 337 | if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) { |
| 338 | kvfree(memcg_lrus); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 339 | return -ENOMEM; |
| 340 | } |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 341 | RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 342 | |
| 343 | return 0; |
| 344 | } |
| 345 | |
| 346 | static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) |
| 347 | { |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 348 | struct list_lru_memcg *memcg_lrus; |
| 349 | /* |
| 350 | * This is called when shrinker has already been unregistered, |
| 351 | * and nobody can use it. So, there is no need to use kvfree_rcu(). |
| 352 | */ |
| 353 | memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true); |
| 354 | __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids); |
| 355 | kvfree(memcg_lrus); |
| 356 | } |
| 357 | |
| 358 | static void kvfree_rcu(struct rcu_head *head) |
| 359 | { |
| 360 | struct list_lru_memcg *mlru; |
| 361 | |
| 362 | mlru = container_of(head, struct list_lru_memcg, rcu); |
| 363 | kvfree(mlru); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 364 | } |
| 365 | |
| 366 | static int memcg_update_list_lru_node(struct list_lru_node *nlru, |
| 367 | int old_size, int new_size) |
| 368 | { |
| 369 | struct list_lru_memcg *old, *new; |
| 370 | |
| 371 | BUG_ON(old_size > new_size); |
| 372 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 373 | old = rcu_dereference_protected(nlru->memcg_lrus, |
| 374 | lockdep_is_held(&list_lrus_mutex)); |
| 375 | new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 376 | if (!new) |
| 377 | return -ENOMEM; |
| 378 | |
| 379 | if (__memcg_init_list_lru_node(new, old_size, new_size)) { |
Johannes Weiner | f80c7da | 2017-10-03 16:16:10 -0700 | [diff] [blame] | 380 | kvfree(new); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 381 | return -ENOMEM; |
| 382 | } |
| 383 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 384 | memcpy(&new->lru, &old->lru, old_size * sizeof(void *)); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 385 | |
| 386 | /* |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 387 | * The locking below allows readers that hold nlru->lock avoid taking |
| 388 | * rcu_read_lock (see list_lru_from_memcg_idx). |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 389 | * |
| 390 | * Since list_lru_{add,del} may be called under an IRQ-safe lock, |
| 391 | * we have to use IRQ-safe primitives here to avoid deadlock. |
| 392 | */ |
| 393 | spin_lock_irq(&nlru->lock); |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 394 | rcu_assign_pointer(nlru->memcg_lrus, new); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 395 | spin_unlock_irq(&nlru->lock); |
| 396 | |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 397 | call_rcu(&old->rcu, kvfree_rcu); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 398 | return 0; |
| 399 | } |
| 400 | |
| 401 | static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru, |
| 402 | int old_size, int new_size) |
| 403 | { |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 404 | struct list_lru_memcg *memcg_lrus; |
| 405 | |
| 406 | memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, |
| 407 | lockdep_is_held(&list_lrus_mutex)); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 408 | /* do not bother shrinking the array back to the old size, because we |
| 409 | * cannot handle allocation failures here */ |
Kirill Tkhai | 0c7c1be | 2018-04-05 16:25:08 -0700 | [diff] [blame] | 410 | __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 411 | } |
| 412 | |
| 413 | static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) |
| 414 | { |
| 415 | int i; |
| 416 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 417 | if (!memcg_aware) |
| 418 | return 0; |
| 419 | |
| 420 | for_each_node(i) { |
| 421 | if (memcg_init_list_lru_node(&lru->node[i])) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 422 | goto fail; |
| 423 | } |
| 424 | return 0; |
| 425 | fail: |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 426 | for (i = i - 1; i >= 0; i--) { |
| 427 | if (!lru->node[i].memcg_lrus) |
| 428 | continue; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 429 | memcg_destroy_list_lru_node(&lru->node[i]); |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 430 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 431 | return -ENOMEM; |
| 432 | } |
| 433 | |
| 434 | static void memcg_destroy_list_lru(struct list_lru *lru) |
| 435 | { |
| 436 | int i; |
| 437 | |
| 438 | if (!list_lru_memcg_aware(lru)) |
| 439 | return; |
| 440 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 441 | for_each_node(i) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 442 | memcg_destroy_list_lru_node(&lru->node[i]); |
| 443 | } |
| 444 | |
| 445 | static int memcg_update_list_lru(struct list_lru *lru, |
| 446 | int old_size, int new_size) |
| 447 | { |
| 448 | int i; |
| 449 | |
| 450 | if (!list_lru_memcg_aware(lru)) |
| 451 | return 0; |
| 452 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 453 | for_each_node(i) { |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 454 | if (memcg_update_list_lru_node(&lru->node[i], |
| 455 | old_size, new_size)) |
| 456 | goto fail; |
| 457 | } |
| 458 | return 0; |
| 459 | fail: |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 460 | for (i = i - 1; i >= 0; i--) { |
| 461 | if (!lru->node[i].memcg_lrus) |
| 462 | continue; |
| 463 | |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 464 | memcg_cancel_update_list_lru_node(&lru->node[i], |
| 465 | old_size, new_size); |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 466 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 467 | return -ENOMEM; |
| 468 | } |
| 469 | |
| 470 | static void memcg_cancel_update_list_lru(struct list_lru *lru, |
| 471 | int old_size, int new_size) |
| 472 | { |
| 473 | int i; |
| 474 | |
| 475 | if (!list_lru_memcg_aware(lru)) |
| 476 | return; |
| 477 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 478 | for_each_node(i) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 479 | memcg_cancel_update_list_lru_node(&lru->node[i], |
| 480 | old_size, new_size); |
| 481 | } |
| 482 | |
| 483 | int memcg_update_all_list_lrus(int new_size) |
| 484 | { |
| 485 | int ret = 0; |
| 486 | struct list_lru *lru; |
| 487 | int old_size = memcg_nr_cache_ids; |
| 488 | |
| 489 | mutex_lock(&list_lrus_mutex); |
| 490 | list_for_each_entry(lru, &list_lrus, list) { |
| 491 | ret = memcg_update_list_lru(lru, old_size, new_size); |
| 492 | if (ret) |
| 493 | goto fail; |
| 494 | } |
| 495 | out: |
| 496 | mutex_unlock(&list_lrus_mutex); |
| 497 | return ret; |
| 498 | fail: |
| 499 | list_for_each_entry_continue_reverse(lru, &list_lrus, list) |
| 500 | memcg_cancel_update_list_lru(lru, old_size, new_size); |
| 501 | goto out; |
| 502 | } |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 503 | |
| 504 | static void memcg_drain_list_lru_node(struct list_lru_node *nlru, |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame^] | 505 | int src_idx, struct mem_cgroup *dst_memcg) |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 506 | { |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame^] | 507 | int dst_idx = dst_memcg->kmemcg_id; |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 508 | struct list_lru_one *src, *dst; |
| 509 | |
| 510 | /* |
| 511 | * Since list_lru_{add,del} may be called under an IRQ-safe lock, |
| 512 | * we have to use IRQ-safe primitives here to avoid deadlock. |
| 513 | */ |
| 514 | spin_lock_irq(&nlru->lock); |
| 515 | |
| 516 | src = list_lru_from_memcg_idx(nlru, src_idx); |
| 517 | dst = list_lru_from_memcg_idx(nlru, dst_idx); |
| 518 | |
| 519 | list_splice_init(&src->list, &dst->list); |
| 520 | dst->nr_items += src->nr_items; |
| 521 | src->nr_items = 0; |
| 522 | |
| 523 | spin_unlock_irq(&nlru->lock); |
| 524 | } |
| 525 | |
| 526 | static void memcg_drain_list_lru(struct list_lru *lru, |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame^] | 527 | int src_idx, struct mem_cgroup *dst_memcg) |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 528 | { |
| 529 | int i; |
| 530 | |
| 531 | if (!list_lru_memcg_aware(lru)) |
| 532 | return; |
| 533 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 534 | for_each_node(i) |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame^] | 535 | memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_memcg); |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 536 | } |
| 537 | |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame^] | 538 | void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg) |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 539 | { |
| 540 | struct list_lru *lru; |
| 541 | |
| 542 | mutex_lock(&list_lrus_mutex); |
| 543 | list_for_each_entry(lru, &list_lrus, list) |
Kirill Tkhai | 9bec5c3 | 2018-08-17 15:47:58 -0700 | [diff] [blame^] | 544 | memcg_drain_list_lru(lru, src_idx, dst_memcg); |
Vladimir Davydov | 2788cf0 | 2015-02-12 14:59:38 -0800 | [diff] [blame] | 545 | mutex_unlock(&list_lrus_mutex); |
| 546 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 547 | #else |
| 548 | static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) |
| 549 | { |
| 550 | return 0; |
| 551 | } |
| 552 | |
| 553 | static void memcg_destroy_list_lru(struct list_lru *lru) |
| 554 | { |
| 555 | } |
Kirill Tkhai | 84c07d1 | 2018-08-17 15:47:25 -0700 | [diff] [blame] | 556 | #endif /* CONFIG_MEMCG_KMEM */ |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 557 | |
| 558 | int __list_lru_init(struct list_lru *lru, bool memcg_aware, |
Kirill Tkhai | c92e8e1 | 2018-08-17 15:47:50 -0700 | [diff] [blame] | 559 | struct lock_class_key *key, struct shrinker *shrinker) |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 560 | { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 561 | int i; |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 562 | size_t size = sizeof(*lru->node) * nr_node_ids; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 563 | int err = -ENOMEM; |
| 564 | |
Kirill Tkhai | c92e8e1 | 2018-08-17 15:47:50 -0700 | [diff] [blame] | 565 | #ifdef CONFIG_MEMCG_KMEM |
| 566 | if (shrinker) |
| 567 | lru->shrinker_id = shrinker->id; |
| 568 | else |
| 569 | lru->shrinker_id = -1; |
| 570 | #endif |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 571 | memcg_get_cache_ids(); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 572 | |
| 573 | lru->node = kzalloc(size, GFP_KERNEL); |
| 574 | if (!lru->node) |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 575 | goto out; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 576 | |
Raghavendra K T | 145949a | 2015-11-05 18:46:26 -0800 | [diff] [blame] | 577 | for_each_node(i) { |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 578 | spin_lock_init(&lru->node[i].lock); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 579 | if (key) |
| 580 | lockdep_set_class(&lru->node[i].lock, key); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 581 | init_one_lru(&lru->node[i].lru); |
Dave Chinner | 3b1d58a | 2013-08-28 10:18:00 +1000 | [diff] [blame] | 582 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 583 | |
| 584 | err = memcg_init_list_lru(lru, memcg_aware); |
| 585 | if (err) { |
| 586 | kfree(lru->node); |
Alexander Polakov | 1bc11d7 | 2016-10-27 17:46:27 -0700 | [diff] [blame] | 587 | /* Do this so a list_lru_destroy() doesn't crash: */ |
| 588 | lru->node = NULL; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 589 | goto out; |
| 590 | } |
| 591 | |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 592 | list_lru_register(lru); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 593 | out: |
| 594 | memcg_put_cache_ids(); |
| 595 | return err; |
Dave Chinner | a38e408 | 2013-08-28 10:17:58 +1000 | [diff] [blame] | 596 | } |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 597 | EXPORT_SYMBOL_GPL(__list_lru_init); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 598 | |
| 599 | void list_lru_destroy(struct list_lru *lru) |
| 600 | { |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 601 | /* Already destroyed or not yet initialized? */ |
| 602 | if (!lru->node) |
| 603 | return; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 604 | |
| 605 | memcg_get_cache_ids(); |
| 606 | |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 607 | list_lru_unregister(lru); |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 608 | |
| 609 | memcg_destroy_list_lru(lru); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 610 | kfree(lru->node); |
Vladimir Davydov | c0a5b56 | 2015-02-12 14:59:07 -0800 | [diff] [blame] | 611 | lru->node = NULL; |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 612 | |
Kirill Tkhai | c92e8e1 | 2018-08-17 15:47:50 -0700 | [diff] [blame] | 613 | #ifdef CONFIG_MEMCG_KMEM |
| 614 | lru->shrinker_id = -1; |
| 615 | #endif |
Vladimir Davydov | 60d3fd3 | 2015-02-12 14:59:10 -0800 | [diff] [blame] | 616 | memcg_put_cache_ids(); |
Glauber Costa | 5ca302c | 2013-08-28 10:18:18 +1000 | [diff] [blame] | 617 | } |
| 618 | EXPORT_SYMBOL_GPL(list_lru_destroy); |