blob: a9021cb3ccdebe7204ef3207106ba33a1b17e145 [file] [log] [blame]
Dave Chinnera38e4082013-08-28 10:17:58 +10001/*
2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
4 *
5 * Generic LRU infrastructure
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>
Dave Chinner3b1d58a2013-08-28 10:18:00 +10009#include <linux/mm.h>
Dave Chinnera38e4082013-08-28 10:17:58 +100010#include <linux/list_lru.h>
Glauber Costa5ca302c2013-08-28 10:18:18 +100011#include <linux/slab.h>
Vladimir Davydovc0a5b562015-02-12 14:59:07 -080012#include <linux/mutex.h>
13
14#ifdef CONFIG_MEMCG_KMEM
15static LIST_HEAD(list_lrus);
16static DEFINE_MUTEX(list_lrus_mutex);
17
18static void list_lru_register(struct list_lru *lru)
19{
20 mutex_lock(&list_lrus_mutex);
21 list_add(&lru->list, &list_lrus);
22 mutex_unlock(&list_lrus_mutex);
23}
24
25static void list_lru_unregister(struct list_lru *lru)
26{
27 mutex_lock(&list_lrus_mutex);
28 list_del(&lru->list);
29 mutex_unlock(&list_lrus_mutex);
30}
31#else
32static void list_lru_register(struct list_lru *lru)
33{
34}
35
36static void list_lru_unregister(struct list_lru *lru)
37{
38}
39#endif /* CONFIG_MEMCG_KMEM */
Dave Chinnera38e4082013-08-28 10:17:58 +100040
41bool list_lru_add(struct list_lru *lru, struct list_head *item)
42{
Dave Chinner3b1d58a2013-08-28 10:18:00 +100043 int nid = page_to_nid(virt_to_page(item));
44 struct list_lru_node *nlru = &lru->node[nid];
45
46 spin_lock(&nlru->lock);
47 WARN_ON_ONCE(nlru->nr_items < 0);
Dave Chinnera38e4082013-08-28 10:17:58 +100048 if (list_empty(item)) {
Dave Chinner3b1d58a2013-08-28 10:18:00 +100049 list_add_tail(item, &nlru->list);
Vladimir Davydovff0b67e2015-02-12 14:59:04 -080050 nlru->nr_items++;
Dave Chinner3b1d58a2013-08-28 10:18:00 +100051 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100052 return true;
53 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +100054 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100055 return false;
56}
57EXPORT_SYMBOL_GPL(list_lru_add);
58
59bool list_lru_del(struct list_lru *lru, struct list_head *item)
60{
Dave Chinner3b1d58a2013-08-28 10:18:00 +100061 int nid = page_to_nid(virt_to_page(item));
62 struct list_lru_node *nlru = &lru->node[nid];
63
64 spin_lock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100065 if (!list_empty(item)) {
66 list_del_init(item);
Vladimir Davydovff0b67e2015-02-12 14:59:04 -080067 nlru->nr_items--;
Dave Chinner3b1d58a2013-08-28 10:18:00 +100068 WARN_ON_ONCE(nlru->nr_items < 0);
69 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100070 return true;
71 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +100072 spin_unlock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +100073 return false;
74}
75EXPORT_SYMBOL_GPL(list_lru_del);
76
Glauber Costa6a4f4962013-08-28 10:18:02 +100077unsigned long
78list_lru_count_node(struct list_lru *lru, int nid)
Dave Chinnera38e4082013-08-28 10:17:58 +100079{
Dave Chinner3b1d58a2013-08-28 10:18:00 +100080 unsigned long count = 0;
Glauber Costa6a4f4962013-08-28 10:18:02 +100081 struct list_lru_node *nlru = &lru->node[nid];
Dave Chinner3b1d58a2013-08-28 10:18:00 +100082
Glauber Costa6a4f4962013-08-28 10:18:02 +100083 spin_lock(&nlru->lock);
84 WARN_ON_ONCE(nlru->nr_items < 0);
85 count += nlru->nr_items;
86 spin_unlock(&nlru->lock);
Dave Chinner3b1d58a2013-08-28 10:18:00 +100087
88 return count;
89}
Glauber Costa6a4f4962013-08-28 10:18:02 +100090EXPORT_SYMBOL_GPL(list_lru_count_node);
Dave Chinner3b1d58a2013-08-28 10:18:00 +100091
Glauber Costa6a4f4962013-08-28 10:18:02 +100092unsigned long
Dave Chinner3b1d58a2013-08-28 10:18:00 +100093list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
94 void *cb_arg, unsigned long *nr_to_walk)
95{
96
97 struct list_lru_node *nlru = &lru->node[nid];
Dave Chinnera38e4082013-08-28 10:17:58 +100098 struct list_head *item, *n;
Dave Chinner3b1d58a2013-08-28 10:18:00 +100099 unsigned long isolated = 0;
Dave Chinnera38e4082013-08-28 10:17:58 +1000100
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000101 spin_lock(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000102restart:
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000103 list_for_each_safe(item, n, &nlru->list) {
Dave Chinnera38e4082013-08-28 10:17:58 +1000104 enum lru_status ret;
Dave Chinner5cedf7212013-08-28 10:18:01 +1000105
106 /*
107 * decrement nr_to_walk first so that we don't livelock if we
108 * get stuck on large numbesr of LRU_RETRY items
109 */
Russell Kingc56b0972013-10-30 14:16:16 +0000110 if (!*nr_to_walk)
Dave Chinner5cedf7212013-08-28 10:18:01 +1000111 break;
Russell Kingc56b0972013-10-30 14:16:16 +0000112 --*nr_to_walk;
Dave Chinner5cedf7212013-08-28 10:18:01 +1000113
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000114 ret = isolate(item, &nlru->lock, cb_arg);
Dave Chinnera38e4082013-08-28 10:17:58 +1000115 switch (ret) {
Johannes Weiner449dd692014-04-03 14:47:56 -0700116 case LRU_REMOVED_RETRY:
117 assert_spin_locked(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000118 case LRU_REMOVED:
Vladimir Davydovff0b67e2015-02-12 14:59:04 -0800119 nlru->nr_items--;
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000120 WARN_ON_ONCE(nlru->nr_items < 0);
121 isolated++;
Johannes Weiner449dd692014-04-03 14:47:56 -0700122 /*
123 * If the lru lock has been dropped, our list
124 * traversal is now invalid and so we have to
125 * restart from scratch.
126 */
127 if (ret == LRU_REMOVED_RETRY)
128 goto restart;
Dave Chinnera38e4082013-08-28 10:17:58 +1000129 break;
130 case LRU_ROTATE:
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000131 list_move_tail(item, &nlru->list);
Dave Chinnera38e4082013-08-28 10:17:58 +1000132 break;
133 case LRU_SKIP:
134 break;
135 case LRU_RETRY:
Dave Chinner5cedf7212013-08-28 10:18:01 +1000136 /*
137 * The lru lock has been dropped, our list traversal is
138 * now invalid and so we have to restart from scratch.
139 */
Johannes Weiner449dd692014-04-03 14:47:56 -0700140 assert_spin_locked(&nlru->lock);
Dave Chinnera38e4082013-08-28 10:17:58 +1000141 goto restart;
142 default:
143 BUG();
144 }
Dave Chinnera38e4082013-08-28 10:17:58 +1000145 }
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000146
147 spin_unlock(&nlru->lock);
148 return isolated;
149}
150EXPORT_SYMBOL_GPL(list_lru_walk_node);
151
Johannes Weiner449dd692014-04-03 14:47:56 -0700152int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key)
Dave Chinnera38e4082013-08-28 10:17:58 +1000153{
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000154 int i;
Glauber Costa5ca302c2013-08-28 10:18:18 +1000155 size_t size = sizeof(*lru->node) * nr_node_ids;
156
157 lru->node = kzalloc(size, GFP_KERNEL);
158 if (!lru->node)
159 return -ENOMEM;
Dave Chinnera38e4082013-08-28 10:17:58 +1000160
Glauber Costa5ca302c2013-08-28 10:18:18 +1000161 for (i = 0; i < nr_node_ids; i++) {
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000162 spin_lock_init(&lru->node[i].lock);
Johannes Weiner449dd692014-04-03 14:47:56 -0700163 if (key)
164 lockdep_set_class(&lru->node[i].lock, key);
Dave Chinner3b1d58a2013-08-28 10:18:00 +1000165 INIT_LIST_HEAD(&lru->node[i].list);
166 lru->node[i].nr_items = 0;
167 }
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800168 list_lru_register(lru);
Dave Chinnera38e4082013-08-28 10:17:58 +1000169 return 0;
170}
Johannes Weiner449dd692014-04-03 14:47:56 -0700171EXPORT_SYMBOL_GPL(list_lru_init_key);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000172
173void list_lru_destroy(struct list_lru *lru)
174{
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800175 /* Already destroyed or not yet initialized? */
176 if (!lru->node)
177 return;
178 list_lru_unregister(lru);
Glauber Costa5ca302c2013-08-28 10:18:18 +1000179 kfree(lru->node);
Vladimir Davydovc0a5b562015-02-12 14:59:07 -0800180 lru->node = NULL;
Glauber Costa5ca302c2013-08-28 10:18:18 +1000181}
182EXPORT_SYMBOL_GPL(list_lru_destroy);