blob: aa878fbcf7051452c1f12cffdca121abe741c70c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Kay Sievers10fbcf42011-12-21 14:48:43 -08003 * Basic Node interface support
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 */
5
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/mm.h>
Gary Hadec04fc582009-01-06 14:39:14 -08009#include <linux/memory.h>
KOSAKI Motohirofa25c502011-05-24 17:11:28 -070010#include <linux/vmstat.h>
Andrew Morton6e259e72013-04-29 15:08:07 -070011#include <linux/notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/node.h>
13#include <linux/hugetlb.h>
Mel Gormaned4a6d72010-05-24 14:32:29 -070014#include <linux/compaction.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/cpumask.h>
16#include <linux/topology.h>
17#include <linux/nodemask.h>
KAMEZAWA Hiroyuki76b67ed2006-06-27 02:53:41 -070018#include <linux/cpu.h>
Lee Schermerhornbde631a2007-10-16 01:26:27 -070019#include <linux/device.h>
Keith Busch08d9dbe2019-03-11 14:56:00 -060020#include <linux/pm_runtime.h>
Lee Schermerhornaf936a12008-10-18 20:26:53 -070021#include <linux/swap.h>
Tejun Heo18e5b532010-04-06 19:23:33 +090022#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Kay Sievers10fbcf42011-12-21 14:48:43 -080024static struct bus_type node_subsys = {
Kay Sieversaf5ca3f42007-12-20 02:09:39 +010025 .name = "node",
Kay Sievers10fbcf42011-12-21 14:48:43 -080026 .dev_name = "node",
Linus Torvalds1da177e2005-04-16 15:20:36 -070027};
28
29
Sudeep Holla5aaba362014-09-30 14:48:22 +010030static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070031{
Zhen Lei064f0e92017-10-13 15:57:50 -070032 ssize_t n;
33 cpumask_var_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 struct node *node_dev = to_node(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Mike Travis39106dc2008-04-08 11:43:03 -070036 /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
37 BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Zhen Lei064f0e92017-10-13 15:57:50 -070039 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
40 return 0;
41
42 cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
43 n = cpumap_print_to_pagebuf(list, buf, mask);
44 free_cpumask_var(mask);
45
46 return n;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047}
48
Kay Sievers10fbcf42011-12-21 14:48:43 -080049static inline ssize_t node_read_cpumask(struct device *dev,
50 struct device_attribute *attr, char *buf)
Mike Travis39106dc2008-04-08 11:43:03 -070051{
Sudeep Holla5aaba362014-09-30 14:48:22 +010052 return node_read_cpumap(dev, false, buf);
Mike Travis39106dc2008-04-08 11:43:03 -070053}
Kay Sievers10fbcf42011-12-21 14:48:43 -080054static inline ssize_t node_read_cpulist(struct device *dev,
55 struct device_attribute *attr, char *buf)
Mike Travis39106dc2008-04-08 11:43:03 -070056{
Sudeep Holla5aaba362014-09-30 14:48:22 +010057 return node_read_cpumap(dev, true, buf);
Mike Travis39106dc2008-04-08 11:43:03 -070058}
59
Kay Sievers10fbcf42011-12-21 14:48:43 -080060static DEVICE_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL);
61static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Keith Busch08d9dbe2019-03-11 14:56:00 -060063/**
64 * struct node_access_nodes - Access class device to hold user visible
65 * relationships to other nodes.
66 * @dev: Device for this memory access class
67 * @list_node: List element in the node's access list
68 * @access: The access class rank
Mauro Carvalho Chehab58cb3462019-06-18 15:55:12 -030069 * @hmem_attrs: Heterogeneous memory performance attributes
Keith Busch08d9dbe2019-03-11 14:56:00 -060070 */
71struct node_access_nodes {
72 struct device dev;
73 struct list_head list_node;
74 unsigned access;
Keith Busche1cf33a2019-03-11 14:56:01 -060075#ifdef CONFIG_HMEM_REPORTING
76 struct node_hmem_attrs hmem_attrs;
77#endif
Keith Busch08d9dbe2019-03-11 14:56:00 -060078};
79#define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
80
81static struct attribute *node_init_access_node_attrs[] = {
82 NULL,
83};
84
85static struct attribute *node_targ_access_node_attrs[] = {
86 NULL,
87};
88
89static const struct attribute_group initiators = {
90 .name = "initiators",
91 .attrs = node_init_access_node_attrs,
92};
93
94static const struct attribute_group targets = {
95 .name = "targets",
96 .attrs = node_targ_access_node_attrs,
97};
98
99static const struct attribute_group *node_access_node_groups[] = {
100 &initiators,
101 &targets,
102 NULL,
103};
104
105static void node_remove_accesses(struct node *node)
106{
107 struct node_access_nodes *c, *cnext;
108
109 list_for_each_entry_safe(c, cnext, &node->access_list, list_node) {
110 list_del(&c->list_node);
111 device_unregister(&c->dev);
112 }
113}
114
115static void node_access_release(struct device *dev)
116{
117 kfree(to_access_nodes(dev));
118}
119
120static struct node_access_nodes *node_init_node_access(struct node *node,
121 unsigned access)
122{
123 struct node_access_nodes *access_node;
124 struct device *dev;
125
126 list_for_each_entry(access_node, &node->access_list, list_node)
127 if (access_node->access == access)
128 return access_node;
129
130 access_node = kzalloc(sizeof(*access_node), GFP_KERNEL);
131 if (!access_node)
132 return NULL;
133
134 access_node->access = access;
135 dev = &access_node->dev;
136 dev->parent = &node->dev;
137 dev->release = node_access_release;
138 dev->groups = node_access_node_groups;
139 if (dev_set_name(dev, "access%u", access))
140 goto free;
141
142 if (device_register(dev))
143 goto free_name;
144
145 pm_runtime_no_callbacks(dev);
146 list_add_tail(&access_node->list_node, &node->access_list);
147 return access_node;
148free_name:
149 kfree_const(dev->kobj.name);
150free:
151 kfree(access_node);
152 return NULL;
153}
154
Keith Busche1cf33a2019-03-11 14:56:01 -0600155#ifdef CONFIG_HMEM_REPORTING
156#define ACCESS_ATTR(name) \
157static ssize_t name##_show(struct device *dev, \
158 struct device_attribute *attr, \
159 char *buf) \
160{ \
161 return sprintf(buf, "%u\n", to_access_nodes(dev)->hmem_attrs.name); \
162} \
163static DEVICE_ATTR_RO(name);
164
165ACCESS_ATTR(read_bandwidth)
166ACCESS_ATTR(read_latency)
167ACCESS_ATTR(write_bandwidth)
168ACCESS_ATTR(write_latency)
169
170static struct attribute *access_attrs[] = {
171 &dev_attr_read_bandwidth.attr,
172 &dev_attr_read_latency.attr,
173 &dev_attr_write_bandwidth.attr,
174 &dev_attr_write_latency.attr,
175 NULL,
176};
177
178/**
179 * node_set_perf_attrs - Set the performance values for given access class
180 * @nid: Node identifier to be set
181 * @hmem_attrs: Heterogeneous memory performance attributes
182 * @access: The access class the for the given attributes
183 */
184void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
185 unsigned access)
186{
187 struct node_access_nodes *c;
188 struct node *node;
189 int i;
190
191 if (WARN_ON_ONCE(!node_online(nid)))
192 return;
193
194 node = node_devices[nid];
195 c = node_init_node_access(node, access);
196 if (!c)
197 return;
198
199 c->hmem_attrs = *hmem_attrs;
200 for (i = 0; access_attrs[i] != NULL; i++) {
201 if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
202 "initiators")) {
203 pr_info("failed to add performance attribute to node %d\n",
204 nid);
205 break;
206 }
207 }
208}
Keith Buschacc02a12019-03-11 14:56:02 -0600209
210/**
211 * struct node_cache_info - Internal tracking for memory node caches
212 * @dev: Device represeting the cache level
213 * @node: List element for tracking in the node
214 * @cache_attrs:Attributes for this cache level
215 */
216struct node_cache_info {
217 struct device dev;
218 struct list_head node;
219 struct node_cache_attrs cache_attrs;
220};
221#define to_cache_info(device) container_of(device, struct node_cache_info, dev)
222
223#define CACHE_ATTR(name, fmt) \
224static ssize_t name##_show(struct device *dev, \
225 struct device_attribute *attr, \
226 char *buf) \
227{ \
228 return sprintf(buf, fmt "\n", to_cache_info(dev)->cache_attrs.name);\
229} \
230DEVICE_ATTR_RO(name);
231
232CACHE_ATTR(size, "%llu")
233CACHE_ATTR(line_size, "%u")
234CACHE_ATTR(indexing, "%u")
235CACHE_ATTR(write_policy, "%u")
236
237static struct attribute *cache_attrs[] = {
238 &dev_attr_indexing.attr,
239 &dev_attr_size.attr,
240 &dev_attr_line_size.attr,
241 &dev_attr_write_policy.attr,
242 NULL,
243};
244ATTRIBUTE_GROUPS(cache);
245
246static void node_cache_release(struct device *dev)
247{
248 kfree(dev);
249}
250
251static void node_cacheinfo_release(struct device *dev)
252{
253 struct node_cache_info *info = to_cache_info(dev);
254 kfree(info);
255}
256
257static void node_init_cache_dev(struct node *node)
258{
259 struct device *dev;
260
261 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
262 if (!dev)
263 return;
264
265 dev->parent = &node->dev;
266 dev->release = node_cache_release;
267 if (dev_set_name(dev, "memory_side_cache"))
268 goto free_dev;
269
270 if (device_register(dev))
271 goto free_name;
272
273 pm_runtime_no_callbacks(dev);
274 node->cache_dev = dev;
275 return;
276free_name:
277 kfree_const(dev->kobj.name);
278free_dev:
279 kfree(dev);
280}
281
282/**
283 * node_add_cache() - add cache attribute to a memory node
284 * @nid: Node identifier that has new cache attributes
285 * @cache_attrs: Attributes for the cache being added
286 */
287void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
288{
289 struct node_cache_info *info;
290 struct device *dev;
291 struct node *node;
292
293 if (!node_online(nid) || !node_devices[nid])
294 return;
295
296 node = node_devices[nid];
297 list_for_each_entry(info, &node->cache_attrs, node) {
298 if (info->cache_attrs.level == cache_attrs->level) {
299 dev_warn(&node->dev,
300 "attempt to add duplicate cache level:%d\n",
301 cache_attrs->level);
302 return;
303 }
304 }
305
306 if (!node->cache_dev)
307 node_init_cache_dev(node);
308 if (!node->cache_dev)
309 return;
310
311 info = kzalloc(sizeof(*info), GFP_KERNEL);
312 if (!info)
313 return;
314
315 dev = &info->dev;
316 dev->parent = node->cache_dev;
317 dev->release = node_cacheinfo_release;
318 dev->groups = cache_groups;
319 if (dev_set_name(dev, "index%d", cache_attrs->level))
320 goto free_cache;
321
322 info->cache_attrs = *cache_attrs;
323 if (device_register(dev)) {
324 dev_warn(&node->dev, "failed to add cache level:%d\n",
325 cache_attrs->level);
326 goto free_name;
327 }
328 pm_runtime_no_callbacks(dev);
329 list_add_tail(&info->node, &node->cache_attrs);
330 return;
331free_name:
332 kfree_const(dev->kobj.name);
333free_cache:
334 kfree(info);
335}
336
337static void node_remove_caches(struct node *node)
338{
339 struct node_cache_info *info, *next;
340
341 if (!node->cache_dev)
342 return;
343
344 list_for_each_entry_safe(info, next, &node->cache_attrs, node) {
345 list_del(&info->node);
346 device_unregister(&info->dev);
347 }
348 device_unregister(node->cache_dev);
349}
350
351static void node_init_caches(unsigned int nid)
352{
353 INIT_LIST_HEAD(&node_devices[nid]->cache_attrs);
354}
355#else
356static void node_init_caches(unsigned int nid) { }
357static void node_remove_caches(struct node *node) { }
Keith Busche1cf33a2019-03-11 14:56:01 -0600358#endif
359
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360#define K(x) ((x) << (PAGE_SHIFT - 10))
Kay Sievers10fbcf42011-12-21 14:48:43 -0800361static ssize_t node_read_meminfo(struct device *dev,
362 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363{
364 int n;
365 int nid = dev->id;
Mel Gorman599d0c92016-07-28 15:45:31 -0700366 struct pglist_data *pgdat = NODE_DATA(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 struct sysinfo i;
Vlastimil Babka61f94e12018-10-26 15:05:50 -0700368 unsigned long sreclaimable, sunreclaimable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
370 si_meminfo_node(&i, nid);
Vlastimil Babka61f94e12018-10-26 15:05:50 -0700371 sreclaimable = node_page_state(pgdat, NR_SLAB_RECLAIMABLE);
372 sunreclaimable = node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE);
KOSAKI Motohiro7ee92252010-08-09 17:19:50 -0700373 n = sprintf(buf,
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700374 "Node %d MemTotal: %8lu kB\n"
375 "Node %d MemFree: %8lu kB\n"
376 "Node %d MemUsed: %8lu kB\n"
377 "Node %d Active: %8lu kB\n"
378 "Node %d Inactive: %8lu kB\n"
379 "Node %d Active(anon): %8lu kB\n"
380 "Node %d Inactive(anon): %8lu kB\n"
381 "Node %d Active(file): %8lu kB\n"
382 "Node %d Inactive(file): %8lu kB\n"
Nick Piggin5344b7e2008-10-18 20:26:51 -0700383 "Node %d Unevictable: %8lu kB\n"
KOSAKI Motohiro7ee92252010-08-09 17:19:50 -0700384 "Node %d Mlocked: %8lu kB\n",
385 nid, K(i.totalram),
386 nid, K(i.freeram),
387 nid, K(i.totalram - i.freeram),
Mel Gorman599d0c92016-07-28 15:45:31 -0700388 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
389 node_page_state(pgdat, NR_ACTIVE_FILE)),
390 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
391 node_page_state(pgdat, NR_INACTIVE_FILE)),
392 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
393 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
394 nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
395 nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
396 nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
Mel Gorman75ef7182016-07-28 15:45:24 -0700397 nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
KOSAKI Motohiro7ee92252010-08-09 17:19:50 -0700398
Christoph Lameter182e8e22006-09-25 23:31:10 -0700399#ifdef CONFIG_HIGHMEM
KOSAKI Motohiro7ee92252010-08-09 17:19:50 -0700400 n += sprintf(buf + n,
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700401 "Node %d HighTotal: %8lu kB\n"
402 "Node %d HighFree: %8lu kB\n"
403 "Node %d LowTotal: %8lu kB\n"
KOSAKI Motohiro7ee92252010-08-09 17:19:50 -0700404 "Node %d LowFree: %8lu kB\n",
405 nid, K(i.totalhigh),
406 nid, K(i.freehigh),
407 nid, K(i.totalram - i.totalhigh),
408 nid, K(i.freeram - i.freehigh));
Christoph Lameter182e8e22006-09-25 23:31:10 -0700409#endif
KOSAKI Motohiro7ee92252010-08-09 17:19:50 -0700410 n += sprintf(buf + n,
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700411 "Node %d Dirty: %8lu kB\n"
412 "Node %d Writeback: %8lu kB\n"
413 "Node %d FilePages: %8lu kB\n"
414 "Node %d Mapped: %8lu kB\n"
415 "Node %d AnonPages: %8lu kB\n"
KOSAKI Motohiro4b021082009-09-21 17:01:33 -0700416 "Node %d Shmem: %8lu kB\n"
KOSAKI Motohiroc6a7f572009-09-21 17:01:32 -0700417 "Node %d KernelStack: %8lu kB\n"
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700418 "Node %d PageTables: %8lu kB\n"
419 "Node %d NFS_Unstable: %8lu kB\n"
420 "Node %d Bounce: %8lu kB\n"
421 "Node %d WritebackTmp: %8lu kB\n"
Vlastimil Babka61f94e12018-10-26 15:05:50 -0700422 "Node %d KReclaimable: %8lu kB\n"
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700423 "Node %d Slab: %8lu kB\n"
424 "Node %d SReclaimable: %8lu kB\n"
David Rientjes05b258e2011-01-13 15:47:14 -0800425 "Node %d SUnreclaim: %8lu kB\n"
426#ifdef CONFIG_TRANSPARENT_HUGEPAGE
427 "Node %d AnonHugePages: %8lu kB\n"
Kirill A. Shutemov65c45372016-07-26 15:26:10 -0700428 "Node %d ShmemHugePages: %8lu kB\n"
429 "Node %d ShmemPmdMapped: %8lu kB\n"
David Rientjes05b258e2011-01-13 15:47:14 -0800430#endif
431 ,
Mel Gorman11fb9982016-07-28 15:46:20 -0700432 nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
433 nid, K(node_page_state(pgdat, NR_WRITEBACK)),
434 nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
Mel Gorman50658e22016-07-28 15:46:14 -0700435 nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
Mel Gorman4b9d0fa2016-07-28 15:46:17 -0700436 nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
Rafael Aquinicc7452b2014-08-06 16:06:38 -0700437 nid, K(i.sharedram),
Andy Lutomirskid30dd8b2016-07-28 15:48:14 -0700438 nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB),
Mel Gorman75ef7182016-07-28 15:45:24 -0700439 nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
Mel Gorman11fb9982016-07-28 15:46:20 -0700440 nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
Mel Gorman75ef7182016-07-28 15:45:24 -0700441 nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
Mel Gorman11fb9982016-07-28 15:46:20 -0700442 nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
Vlastimil Babka61f94e12018-10-26 15:05:50 -0700443 nid, K(sreclaimable +
444 node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
445 nid, K(sreclaimable + sunreclaimable),
446 nid, K(sreclaimable),
447 nid, K(sunreclaimable)
David Rientjes05b258e2011-01-13 15:47:14 -0800448#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Vlastimil Babka61f94e12018-10-26 15:05:50 -0700449 ,
Mel Gorman11fb9982016-07-28 15:46:20 -0700450 nid, K(node_page_state(pgdat, NR_ANON_THPS) *
Kirill A. Shutemov65c45372016-07-26 15:26:10 -0700451 HPAGE_PMD_NR),
Mel Gorman11fb9982016-07-28 15:46:20 -0700452 nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
Kirill A. Shutemov65c45372016-07-26 15:26:10 -0700453 HPAGE_PMD_NR),
Mel Gorman11fb9982016-07-28 15:46:20 -0700454 nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
Vlastimil Babka61f94e12018-10-26 15:05:50 -0700455 HPAGE_PMD_NR)
David Rientjes05b258e2011-01-13 15:47:14 -0800456#endif
Vlastimil Babka61f94e12018-10-26 15:05:50 -0700457 );
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 n += hugetlb_report_node_meminfo(nid, buf + n);
459 return n;
460}
461
462#undef K
Kay Sievers10fbcf42011-12-21 14:48:43 -0800463static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
Kay Sievers10fbcf42011-12-21 14:48:43 -0800465static ssize_t node_read_numastat(struct device *dev,
466 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 return sprintf(buf,
469 "numa_hit %lu\n"
470 "numa_miss %lu\n"
471 "numa_foreign %lu\n"
472 "interleave_hit %lu\n"
473 "local_node %lu\n"
474 "other_node %lu\n",
Kemi Wang3a321d22017-09-08 16:12:48 -0700475 sum_zone_numa_state(dev->id, NUMA_HIT),
476 sum_zone_numa_state(dev->id, NUMA_MISS),
477 sum_zone_numa_state(dev->id, NUMA_FOREIGN),
478 sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT),
479 sum_zone_numa_state(dev->id, NUMA_LOCAL),
480 sum_zone_numa_state(dev->id, NUMA_OTHER));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481}
Kay Sievers10fbcf42011-12-21 14:48:43 -0800482static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
Kay Sievers10fbcf42011-12-21 14:48:43 -0800484static ssize_t node_read_vmstat(struct device *dev,
485 struct device_attribute *attr, char *buf)
Michael Rubin2ac39032010-10-26 14:21:35 -0700486{
487 int nid = dev->id;
Mel Gorman75ef7182016-07-28 15:45:24 -0700488 struct pglist_data *pgdat = NODE_DATA(nid);
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700489 int i;
490 int n = 0;
491
492 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
493 n += sprintf(buf+n, "%s %lu\n", vmstat_text[i],
Mel Gorman75ef7182016-07-28 15:45:24 -0700494 sum_zone_node_page_state(nid, i));
495
Kemi Wang3a321d22017-09-08 16:12:48 -0700496#ifdef CONFIG_NUMA
497 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
Mel Gorman75ef7182016-07-28 15:45:24 -0700498 n += sprintf(buf+n, "%s %lu\n",
499 vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
Kemi Wang3a321d22017-09-08 16:12:48 -0700500 sum_zone_numa_state(nid, i));
501#endif
502
503 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
504 n += sprintf(buf+n, "%s %lu\n",
505 vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
506 NR_VM_NUMA_STAT_ITEMS],
Mel Gorman75ef7182016-07-28 15:45:24 -0700507 node_page_state(pgdat, i));
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700508
509 return n;
Michael Rubin2ac39032010-10-26 14:21:35 -0700510}
Kay Sievers10fbcf42011-12-21 14:48:43 -0800511static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
Michael Rubin2ac39032010-10-26 14:21:35 -0700512
Kay Sievers10fbcf42011-12-21 14:48:43 -0800513static ssize_t node_read_distance(struct device *dev,
Ana Nedelcu518d3f32015-03-08 12:48:48 +0200514 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515{
516 int nid = dev->id;
517 int len = 0;
518 int i;
519
David Rientjes12ee3c02010-03-10 14:50:21 -0800520 /*
521 * buf is currently PAGE_SIZE in length and each node needs 4 chars
522 * at the most (distance + space or newline).
523 */
524 BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
526 for_each_online_node(i)
527 len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i));
528
529 len += sprintf(buf + len, "\n");
530 return len;
531}
Kay Sievers10fbcf42011-12-21 14:48:43 -0800532static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
Takashi Iwai3c9b8aa2015-01-29 12:29:22 +0100534static struct attribute *node_dev_attrs[] = {
535 &dev_attr_cpumap.attr,
536 &dev_attr_cpulist.attr,
537 &dev_attr_meminfo.attr,
538 &dev_attr_numastat.attr,
539 &dev_attr_distance.attr,
540 &dev_attr_vmstat.attr,
541 NULL
542};
Greg Kroah-Hartman7ca7ec42015-03-25 13:47:17 +0100543ATTRIBUTE_GROUPS(node_dev);
Takashi Iwai3c9b8aa2015-01-29 12:29:22 +0100544
Lee Schermerhorn9a3052302009-12-14 17:58:25 -0800545#ifdef CONFIG_HUGETLBFS
546/*
547 * hugetlbfs per node attributes registration interface:
548 * When/if hugetlb[fs] subsystem initializes [sometime after this module],
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800549 * it will register its per node attributes for all online nodes with
550 * memory. It will also call register_hugetlbfs_with_node(), below, to
Lee Schermerhorn9a3052302009-12-14 17:58:25 -0800551 * register its attribute registration functions with this node driver.
552 * Once these hooks have been initialized, the node driver will call into
553 * the hugetlb module to [un]register attributes for hot-plugged nodes.
554 */
555static node_registration_func_t __hugetlb_register_node;
556static node_registration_func_t __hugetlb_unregister_node;
557
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800558static inline bool hugetlb_register_node(struct node *node)
Lee Schermerhorn9a3052302009-12-14 17:58:25 -0800559{
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800560 if (__hugetlb_register_node &&
Lai Jiangshan8cebfcd2012-12-12 13:51:36 -0800561 node_state(node->dev.id, N_MEMORY)) {
Lee Schermerhorn9a3052302009-12-14 17:58:25 -0800562 __hugetlb_register_node(node);
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800563 return true;
564 }
565 return false;
Lee Schermerhorn9a3052302009-12-14 17:58:25 -0800566}
567
568static inline void hugetlb_unregister_node(struct node *node)
569{
570 if (__hugetlb_unregister_node)
571 __hugetlb_unregister_node(node);
572}
573
574void register_hugetlbfs_with_node(node_registration_func_t doregister,
575 node_registration_func_t unregister)
576{
577 __hugetlb_register_node = doregister;
578 __hugetlb_unregister_node = unregister;
579}
580#else
581static inline void hugetlb_register_node(struct node *node) {}
582
583static inline void hugetlb_unregister_node(struct node *node) {}
584#endif
585
Yasuaki Ishimatsu8c7b5b42012-12-11 16:00:57 -0800586static void node_device_release(struct device *dev)
587{
588 struct node *node = to_node(dev);
589
590#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
591 /*
592 * We schedule the work only when a memory section is
593 * onlined/offlined on this node. When we come here,
594 * all the memory on this node has been offlined,
595 * so we won't enqueue new work to this work.
596 *
597 * The work is using node->node_work, so we should
598 * flush work before freeing the memory.
599 */
600 flush_work(&node->node_work);
601#endif
602 kfree(node);
603}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
605/*
Robert P. J. Day405ae7d2007-02-17 19:13:42 +0100606 * register_node - Setup a sysfs device for a node.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 * @num - Node number to use when creating the device.
608 *
609 * Initialize and register the node device.
610 */
Dou Liyanga7be6e52017-07-10 15:49:20 -0700611static int register_node(struct node *node, int num)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612{
613 int error;
614
Kay Sievers10fbcf42011-12-21 14:48:43 -0800615 node->dev.id = num;
616 node->dev.bus = &node_subsys;
Yasuaki Ishimatsu8c7b5b42012-12-11 16:00:57 -0800617 node->dev.release = node_device_release;
Greg Kroah-Hartman7ca7ec42015-03-25 13:47:17 +0100618 node->dev.groups = node_dev_groups;
Kay Sievers10fbcf42011-12-21 14:48:43 -0800619 error = device_register(&node->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
Arvind Yadavc1cc0d52018-03-11 11:25:50 +0530621 if (error)
622 put_device(&node->dev);
623 else {
Lee Schermerhorn9a3052302009-12-14 17:58:25 -0800624 hugetlb_register_node(node);
Mel Gormaned4a6d72010-05-24 14:32:29 -0700625
626 compaction_register_node(node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 }
628 return error;
629}
630
Keiichiro Tokunaga4b450992005-05-08 21:28:53 +0900631/**
632 * unregister_node - unregister a node device
633 * @node: node going away
634 *
635 * Unregisters a node device @node. All the devices on the node must be
636 * unregistered before calling this function.
637 */
638void unregister_node(struct node *node)
639{
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800640 hugetlb_unregister_node(node); /* no-op, if memoryless node */
Keith Busch08d9dbe2019-03-11 14:56:00 -0600641 node_remove_accesses(node);
Keith Buschacc02a12019-03-11 14:56:02 -0600642 node_remove_caches(node);
Kay Sievers10fbcf42011-12-21 14:48:43 -0800643 device_unregister(&node->dev);
Keiichiro Tokunaga4b450992005-05-08 21:28:53 +0900644}
645
Wen Congyang87327942012-12-11 16:00:56 -0800646struct node *node_devices[MAX_NUMNODES];
Yasunori Goto0fc44152006-06-27 02:53:38 -0700647
KAMEZAWA Hiroyuki76b67ed2006-06-27 02:53:41 -0700648/*
649 * register cpu under node
650 */
651int register_cpu_under_node(unsigned int cpu, unsigned int nid)
652{
Alex Chiang18307942009-12-14 17:59:08 -0800653 int ret;
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800654 struct device *obj;
KAMEZAWA Hiroyuki76b67ed2006-06-27 02:53:41 -0700655
Alex Chiangf8246f32009-12-14 17:59:06 -0800656 if (!node_online(nid))
657 return 0;
658
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800659 obj = get_cpu_device(cpu);
Alex Chiangf8246f32009-12-14 17:59:06 -0800660 if (!obj)
661 return 0;
662
Wen Congyang87327942012-12-11 16:00:56 -0800663 ret = sysfs_create_link(&node_devices[nid]->dev.kobj,
Alex Chiangf8246f32009-12-14 17:59:06 -0800664 &obj->kobj,
665 kobject_name(&obj->kobj));
Alex Chiang18307942009-12-14 17:59:08 -0800666 if (ret)
667 return ret;
668
669 return sysfs_create_link(&obj->kobj,
Wen Congyang87327942012-12-11 16:00:56 -0800670 &node_devices[nid]->dev.kobj,
671 kobject_name(&node_devices[nid]->dev.kobj));
KAMEZAWA Hiroyuki76b67ed2006-06-27 02:53:41 -0700672}
673
Keith Busch08d9dbe2019-03-11 14:56:00 -0600674/**
675 * register_memory_node_under_compute_node - link memory node to its compute
676 * node for a given access class.
Mauro Carvalho Chehab58cb3462019-06-18 15:55:12 -0300677 * @mem_nid: Memory node number
678 * @cpu_nid: Cpu node number
Keith Busch08d9dbe2019-03-11 14:56:00 -0600679 * @access: Access class to register
680 *
681 * Description:
682 * For use with platforms that may have separate memory and compute nodes.
683 * This function will export node relationships linking which memory
684 * initiator nodes can access memory targets at a given ranked access
685 * class.
686 */
687int register_memory_node_under_compute_node(unsigned int mem_nid,
688 unsigned int cpu_nid,
689 unsigned access)
690{
691 struct node *init_node, *targ_node;
692 struct node_access_nodes *initiator, *target;
693 int ret;
694
695 if (!node_online(cpu_nid) || !node_online(mem_nid))
696 return -ENODEV;
697
698 init_node = node_devices[cpu_nid];
699 targ_node = node_devices[mem_nid];
700 initiator = node_init_node_access(init_node, access);
701 target = node_init_node_access(targ_node, access);
702 if (!initiator || !target)
703 return -ENOMEM;
704
705 ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets",
706 &targ_node->dev.kobj,
707 dev_name(&targ_node->dev));
708 if (ret)
709 return ret;
710
711 ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators",
712 &init_node->dev.kobj,
713 dev_name(&init_node->dev));
714 if (ret)
715 goto err;
716
717 return 0;
718 err:
719 sysfs_remove_link_from_group(&initiator->dev.kobj, "targets",
720 dev_name(&targ_node->dev));
721 return ret;
722}
723
KAMEZAWA Hiroyuki76b67ed2006-06-27 02:53:41 -0700724int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
725{
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800726 struct device *obj;
Alex Chiangb9d52da2009-12-14 17:59:07 -0800727
728 if (!node_online(nid))
729 return 0;
730
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800731 obj = get_cpu_device(cpu);
Alex Chiangb9d52da2009-12-14 17:59:07 -0800732 if (!obj)
733 return 0;
734
Wen Congyang87327942012-12-11 16:00:56 -0800735 sysfs_remove_link(&node_devices[nid]->dev.kobj,
Alex Chiangb9d52da2009-12-14 17:59:07 -0800736 kobject_name(&obj->kobj));
Alex Chiang18307942009-12-14 17:59:08 -0800737 sysfs_remove_link(&obj->kobj,
Wen Congyang87327942012-12-11 16:00:56 -0800738 kobject_name(&node_devices[nid]->dev.kobj));
Alex Chiangb9d52da2009-12-14 17:59:07 -0800739
KAMEZAWA Hiroyuki76b67ed2006-06-27 02:53:41 -0700740 return 0;
741}
742
Gary Hadec04fc582009-01-06 14:39:14 -0800743#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
Fabian Frederickbd721ea2016-08-02 14:03:33 -0700744static int __ref get_nid_for_pfn(unsigned long pfn)
Gary Hadec04fc582009-01-06 14:39:14 -0800745{
Gary Hadec04fc582009-01-06 14:39:14 -0800746 if (!pfn_valid_within(pfn))
747 return -1;
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700748#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Thomas Gleixner8cdde382017-05-16 20:42:39 +0200749 if (system_state < SYSTEM_RUNNING)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700750 return early_pfn_to_nid(pfn);
751#endif
Gary Hadec04fc582009-01-06 14:39:14 -0800752 return pfn_to_nid(pfn);
753}
754
755/* register memory section under specified node if it spans that node */
Oscar Salvador4fbce632018-08-17 15:46:22 -0700756int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
Gary Hadec04fc582009-01-06 14:39:14 -0800757{
Oscar Salvador4fbce632018-08-17 15:46:22 -0700758 int ret, nid = *(int *)arg;
Gary Hadec04fc582009-01-06 14:39:14 -0800759 unsigned long pfn, sect_start_pfn, sect_end_pfn;
760
Pavel Tatashind0dc12e2018-04-05 16:23:00 -0700761 mem_blk->nid = nid;
Nathan Fontenotd3360162011-01-20 10:44:29 -0600762
763 sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
764 sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
765 sect_end_pfn += PAGES_PER_SECTION - 1;
Gary Hadec04fc582009-01-06 14:39:14 -0800766 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
767 int page_nid;
768
Yinghai Lu04697852015-09-04 15:42:39 -0700769 /*
770 * memory block could have several absent sections from start.
771 * skip pfn range from absent section
772 */
773 if (!pfn_present(pfn)) {
774 pfn = round_down(pfn + PAGES_PER_SECTION,
775 PAGES_PER_SECTION) - 1;
776 continue;
777 }
778
Pavel Tatashinfc44f7f2018-04-05 16:22:56 -0700779 /*
780 * We need to check if page belongs to nid only for the boot
781 * case, during hotplug we know that all pages in the memory
782 * block belong to the same node.
783 */
Oscar Salvador4fbce632018-08-17 15:46:22 -0700784 if (system_state == SYSTEM_BOOTING) {
Pavel Tatashinfc44f7f2018-04-05 16:22:56 -0700785 page_nid = get_nid_for_pfn(pfn);
786 if (page_nid < 0)
787 continue;
788 if (page_nid != nid)
789 continue;
790 }
Wen Congyang87327942012-12-11 16:00:56 -0800791 ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
Kay Sievers10fbcf42011-12-21 14:48:43 -0800792 &mem_blk->dev.kobj,
793 kobject_name(&mem_blk->dev.kobj));
Alex Chiangdee5d0d2009-12-14 17:59:05 -0800794 if (ret)
795 return ret;
796
Kay Sievers10fbcf42011-12-21 14:48:43 -0800797 return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
Wen Congyang87327942012-12-11 16:00:56 -0800798 &node_devices[nid]->dev.kobj,
799 kobject_name(&node_devices[nid]->dev.kobj));
Gary Hadec04fc582009-01-06 14:39:14 -0800800 }
801 /* mem section does not span the specified node */
802 return 0;
803}
804
805/* unregister memory section under all nodes that it spans */
Nathan Fontenotd3360162011-01-20 10:44:29 -0600806int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
807 unsigned long phys_index)
Gary Hadec04fc582009-01-06 14:39:14 -0800808{
David Rientjes9ae49fa2009-12-14 17:59:46 -0800809 NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
Gary Hadec04fc582009-01-06 14:39:14 -0800810 unsigned long pfn, sect_start_pfn, sect_end_pfn;
811
David Rientjes9ae49fa2009-12-14 17:59:46 -0800812 if (!mem_blk) {
813 NODEMASK_FREE(unlinked_nodes);
Gary Hadec04fc582009-01-06 14:39:14 -0800814 return -EFAULT;
David Rientjes9ae49fa2009-12-14 17:59:46 -0800815 }
816 if (!unlinked_nodes)
817 return -ENOMEM;
818 nodes_clear(*unlinked_nodes);
Nathan Fontenotd3360162011-01-20 10:44:29 -0600819
820 sect_start_pfn = section_nr_to_pfn(phys_index);
Gary Hadec04fc582009-01-06 14:39:14 -0800821 sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
822 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
Roel Kluin47504982009-03-10 12:55:45 -0700823 int nid;
Gary Hadec04fc582009-01-06 14:39:14 -0800824
825 nid = get_nid_for_pfn(pfn);
826 if (nid < 0)
827 continue;
828 if (!node_online(nid))
829 continue;
David Rientjes9ae49fa2009-12-14 17:59:46 -0800830 if (node_test_and_set(nid, *unlinked_nodes))
Gary Hadec04fc582009-01-06 14:39:14 -0800831 continue;
Wen Congyang87327942012-12-11 16:00:56 -0800832 sysfs_remove_link(&node_devices[nid]->dev.kobj,
Kay Sievers10fbcf42011-12-21 14:48:43 -0800833 kobject_name(&mem_blk->dev.kobj));
834 sysfs_remove_link(&mem_blk->dev.kobj,
Wen Congyang87327942012-12-11 16:00:56 -0800835 kobject_name(&node_devices[nid]->dev.kobj));
Gary Hadec04fc582009-01-06 14:39:14 -0800836 }
David Rientjes9ae49fa2009-12-14 17:59:46 -0800837 NODEMASK_FREE(unlinked_nodes);
Gary Hadec04fc582009-01-06 14:39:14 -0800838 return 0;
839}
840
Oscar Salvador4fbce632018-08-17 15:46:22 -0700841int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
Gary Hadec04fc582009-01-06 14:39:14 -0800842{
Oscar Salvador4fbce632018-08-17 15:46:22 -0700843 return walk_memory_range(start_pfn, end_pfn, (void *)&nid,
844 register_mem_sect_under_node);
Gary Hadec04fc582009-01-06 14:39:14 -0800845}
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800846
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800847#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800848/*
849 * Handle per node hstate attribute [un]registration on transistions
850 * to/from memoryless state.
851 */
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800852static void node_hugetlb_work(struct work_struct *work)
853{
854 struct node *node = container_of(work, struct node, node_work);
855
856 /*
857 * We only get here when a node transitions to/from memoryless state.
858 * We can detect which transition occurred by examining whether the
859 * node has memory now. hugetlb_register_node() already check this
860 * so we try to register the attributes. If that fails, then the
861 * node has transitioned to memoryless, try to unregister the
862 * attributes.
863 */
864 if (!hugetlb_register_node(node))
865 hugetlb_unregister_node(node);
866}
867
868static void init_node_hugetlb_work(int nid)
869{
Wen Congyang87327942012-12-11 16:00:56 -0800870 INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work);
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800871}
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800872
873static int node_memory_callback(struct notifier_block *self,
874 unsigned long action, void *arg)
875{
876 struct memory_notify *mnb = arg;
877 int nid = mnb->status_change_nid;
878
879 switch (action) {
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800880 case MEM_ONLINE:
881 case MEM_OFFLINE:
882 /*
883 * offload per node hstate [un]registration to a work thread
884 * when transitioning to/from memoryless state.
885 */
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800886 if (nid != NUMA_NO_NODE)
Wen Congyang87327942012-12-11 16:00:56 -0800887 schedule_work(&node_devices[nid]->node_work);
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800888 break;
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800889
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800890 case MEM_GOING_ONLINE:
891 case MEM_GOING_OFFLINE:
892 case MEM_CANCEL_ONLINE:
893 case MEM_CANCEL_OFFLINE:
894 default:
895 break;
896 }
897
898 return NOTIFY_OK;
899}
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800900#endif /* CONFIG_HUGETLBFS */
Michal Hocko9037a992017-07-06 15:37:49 -0700901#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800902
903#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \
904 !defined(CONFIG_HUGETLBFS)
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800905static inline int node_memory_callback(struct notifier_block *self,
906 unsigned long action, void *arg)
907{
908 return NOTIFY_OK;
909}
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800910
911static void init_node_hugetlb_work(int nid) { }
912
913#endif
Gary Hadec04fc582009-01-06 14:39:14 -0800914
Michal Hocko9037a992017-07-06 15:37:49 -0700915int __register_one_node(int nid)
Yasunori Goto0fc44152006-06-27 02:53:38 -0700916{
Michal Hocko9037a992017-07-06 15:37:49 -0700917 int error;
KAMEZAWA Hiroyuki76b67ed2006-06-27 02:53:41 -0700918 int cpu;
Yasunori Goto0fc44152006-06-27 02:53:38 -0700919
Michal Hocko9037a992017-07-06 15:37:49 -0700920 node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
921 if (!node_devices[nid])
922 return -ENOMEM;
Yasunori Goto0fc44152006-06-27 02:53:38 -0700923
Dou Liyanga7be6e52017-07-10 15:49:20 -0700924 error = register_node(node_devices[nid], nid);
Wen Congyang87327942012-12-11 16:00:56 -0800925
Michal Hocko9037a992017-07-06 15:37:49 -0700926 /* link cpu under this node */
927 for_each_present_cpu(cpu) {
928 if (cpu_to_node(cpu) == nid)
929 register_cpu_under_node(cpu, nid);
Yasunori Goto0fc44152006-06-27 02:53:38 -0700930 }
931
Keith Busch08d9dbe2019-03-11 14:56:00 -0600932 INIT_LIST_HEAD(&node_devices[nid]->access_list);
Michal Hocko9037a992017-07-06 15:37:49 -0700933 /* initialize work queue for memory hot plug */
934 init_node_hugetlb_work(nid);
Keith Buschacc02a12019-03-11 14:56:02 -0600935 node_init_caches(nid);
Yasunori Goto0fc44152006-06-27 02:53:38 -0700936
Michal Hocko9037a992017-07-06 15:37:49 -0700937 return error;
Yasunori Goto0fc44152006-06-27 02:53:38 -0700938}
939
940void unregister_one_node(int nid)
941{
Xishi Qiu92d585e2014-03-06 17:18:21 +0800942 if (!node_devices[nid])
943 return;
944
Wen Congyang87327942012-12-11 16:00:56 -0800945 unregister_node(node_devices[nid]);
Wen Congyang87327942012-12-11 16:00:56 -0800946 node_devices[nid] = NULL;
Yasunori Goto0fc44152006-06-27 02:53:38 -0700947}
948
Lee Schermerhornbde631a2007-10-16 01:26:27 -0700949/*
950 * node states attributes
951 */
952
953static ssize_t print_nodes_state(enum node_states state, char *buf)
954{
955 int n;
956
Tejun Heof799b1a2015-02-13 14:37:56 -0800957 n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
958 nodemask_pr_args(&node_states[state]));
Ryota Ozakif6238812012-05-29 15:06:20 -0700959 buf[n++] = '\n';
960 buf[n] = '\0';
Lee Schermerhornbde631a2007-10-16 01:26:27 -0700961 return n;
962}
963
Andi Kleenb15f5622010-01-05 12:47:59 +0100964struct node_attr {
Kay Sievers10fbcf42011-12-21 14:48:43 -0800965 struct device_attribute attr;
Andi Kleenb15f5622010-01-05 12:47:59 +0100966 enum node_states state;
967};
968
Kay Sievers10fbcf42011-12-21 14:48:43 -0800969static ssize_t show_node_state(struct device *dev,
970 struct device_attribute *attr, char *buf)
Lee Schermerhornbde631a2007-10-16 01:26:27 -0700971{
Andi Kleenb15f5622010-01-05 12:47:59 +0100972 struct node_attr *na = container_of(attr, struct node_attr, attr);
973 return print_nodes_state(na->state, buf);
Lee Schermerhornbde631a2007-10-16 01:26:27 -0700974}
975
Andi Kleenb15f5622010-01-05 12:47:59 +0100976#define _NODE_ATTR(name, state) \
Kay Sievers10fbcf42011-12-21 14:48:43 -0800977 { __ATTR(name, 0444, show_node_state, NULL), state }
Lee Schermerhornbde631a2007-10-16 01:26:27 -0700978
Andi Kleenb15f5622010-01-05 12:47:59 +0100979static struct node_attr node_state_attr[] = {
Lai Jiangshanfcf07d22012-12-11 16:03:13 -0800980 [N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE),
981 [N_ONLINE] = _NODE_ATTR(online, N_ONLINE),
982 [N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
Lee Schermerhornbde631a2007-10-16 01:26:27 -0700983#ifdef CONFIG_HIGHMEM
Lai Jiangshanfcf07d22012-12-11 16:03:13 -0800984 [N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
Lee Schermerhornbde631a2007-10-16 01:26:27 -0700985#endif
Lai Jiangshan20b2f522012-12-12 13:52:00 -0800986 [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
Lai Jiangshanfcf07d22012-12-11 16:03:13 -0800987 [N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
Lee Schermerhornbde631a2007-10-16 01:26:27 -0700988};
989
Kay Sievers10fbcf42011-12-21 14:48:43 -0800990static struct attribute *node_state_attrs[] = {
Lai Jiangshanfcf07d22012-12-11 16:03:13 -0800991 &node_state_attr[N_POSSIBLE].attr.attr,
992 &node_state_attr[N_ONLINE].attr.attr,
993 &node_state_attr[N_NORMAL_MEMORY].attr.attr,
Andi Kleen3701cde2010-01-05 12:48:04 +0100994#ifdef CONFIG_HIGHMEM
Lai Jiangshanfcf07d22012-12-11 16:03:13 -0800995 &node_state_attr[N_HIGH_MEMORY].attr.attr,
Andi Kleen3701cde2010-01-05 12:48:04 +0100996#endif
Lai Jiangshan20b2f522012-12-12 13:52:00 -0800997 &node_state_attr[N_MEMORY].attr.attr,
Lai Jiangshanfcf07d22012-12-11 16:03:13 -0800998 &node_state_attr[N_CPU].attr.attr,
Andi Kleen3701cde2010-01-05 12:48:04 +0100999 NULL
1000};
Lee Schermerhornbde631a2007-10-16 01:26:27 -07001001
Kay Sievers10fbcf42011-12-21 14:48:43 -08001002static struct attribute_group memory_root_attr_group = {
1003 .attrs = node_state_attrs,
1004};
1005
1006static const struct attribute_group *cpu_root_attr_groups[] = {
1007 &memory_root_attr_group,
1008 NULL,
1009};
1010
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -08001011#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
Keiichiro Tokunaga4b450992005-05-08 21:28:53 +09001012static int __init register_node_type(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013{
Lee Schermerhornbde631a2007-10-16 01:26:27 -07001014 int ret;
1015
Andi Kleen3701cde2010-01-05 12:48:04 +01001016 BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
1017 BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
1018
Kay Sievers10fbcf42011-12-21 14:48:43 -08001019 ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -08001020 if (!ret) {
Andrew Morton6e259e72013-04-29 15:08:07 -07001021 static struct notifier_block node_memory_callback_nb = {
1022 .notifier_call = node_memory_callback,
1023 .priority = NODE_CALLBACK_PRI,
1024 };
1025 register_hotmemory_notifier(&node_memory_callback_nb);
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -08001026 }
Lee Schermerhornbde631a2007-10-16 01:26:27 -07001027
1028 /*
1029 * Note: we're not going to unregister the node class if we fail
1030 * to register the node state class attribute files.
1031 */
1032 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033}
1034postcore_initcall(register_node_type);