blob: 87acc47e89515b82df059e25bd8d76d2eb8211ec [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Kay Sievers10fbcf42011-12-21 14:48:43 -08003 * Basic Node interface support
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 */
5
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/mm.h>
Gary Hadec04fc582009-01-06 14:39:14 -08009#include <linux/memory.h>
KOSAKI Motohirofa25c502011-05-24 17:11:28 -070010#include <linux/vmstat.h>
Andrew Morton6e259e72013-04-29 15:08:07 -070011#include <linux/notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/node.h>
13#include <linux/hugetlb.h>
Mel Gormaned4a6d72010-05-24 14:32:29 -070014#include <linux/compaction.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/cpumask.h>
16#include <linux/topology.h>
17#include <linux/nodemask.h>
KAMEZAWA Hiroyuki76b67ed2006-06-27 02:53:41 -070018#include <linux/cpu.h>
Lee Schermerhornbde631a2007-10-16 01:26:27 -070019#include <linux/device.h>
Keith Busch08d9dbe2019-03-11 14:56:00 -060020#include <linux/pm_runtime.h>
Lee Schermerhornaf936a12008-10-18 20:26:53 -070021#include <linux/swap.h>
Tejun Heo18e5b532010-04-06 19:23:33 +090022#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Kay Sievers10fbcf42011-12-21 14:48:43 -080024static struct bus_type node_subsys = {
Kay Sieversaf5ca3f42007-12-20 02:09:39 +010025 .name = "node",
Kay Sievers10fbcf42011-12-21 14:48:43 -080026 .dev_name = "node",
Linus Torvalds1da177e2005-04-16 15:20:36 -070027};
28
Tian Tao75bd50f2021-08-06 23:02:50 +120029static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
30 struct bin_attribute *attr, char *buf,
31 loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032{
Tian Tao75bd50f2021-08-06 23:02:50 +120033 struct device *dev = kobj_to_dev(kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 struct node *node_dev = to_node(dev);
Tian Tao75bd50f2021-08-06 23:02:50 +120035 cpumask_var_t mask;
36 ssize_t n;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Zhen Lei064f0e92017-10-13 15:57:50 -070038 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
39 return 0;
40
41 cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
Tian Tao75bd50f2021-08-06 23:02:50 +120042 n = cpumap_print_bitmask_to_buf(buf, mask, off, count);
Zhen Lei064f0e92017-10-13 15:57:50 -070043 free_cpumask_var(mask);
44
45 return n;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046}
47
Tian Tao75bd50f2021-08-06 23:02:50 +120048static BIN_ATTR_RO(cpumap, 0);
49
50static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
51 struct bin_attribute *attr, char *buf,
52 loff_t off, size_t count)
Mike Travis39106dc2008-04-08 11:43:03 -070053{
Tian Tao75bd50f2021-08-06 23:02:50 +120054 struct device *dev = kobj_to_dev(kobj);
55 struct node *node_dev = to_node(dev);
56 cpumask_var_t mask;
57 ssize_t n;
58
59 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
60 return 0;
61
62 cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
63 n = cpumap_print_list_to_buf(buf, mask, off, count);
64 free_cpumask_var(mask);
65
66 return n;
Mike Travis39106dc2008-04-08 11:43:03 -070067}
Joe Perches948b3ed2020-09-16 13:40:42 -070068
Tian Tao75bd50f2021-08-06 23:02:50 +120069static BIN_ATTR_RO(cpulist, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Keith Busch08d9dbe2019-03-11 14:56:00 -060071/**
72 * struct node_access_nodes - Access class device to hold user visible
73 * relationships to other nodes.
74 * @dev: Device for this memory access class
75 * @list_node: List element in the node's access list
76 * @access: The access class rank
Mauro Carvalho Chehab58cb3462019-06-18 15:55:12 -030077 * @hmem_attrs: Heterogeneous memory performance attributes
Keith Busch08d9dbe2019-03-11 14:56:00 -060078 */
79struct node_access_nodes {
80 struct device dev;
81 struct list_head list_node;
Jinchao Wange7deeb92021-06-29 01:19:07 +080082 unsigned int access;
Keith Busche1cf33a2019-03-11 14:56:01 -060083#ifdef CONFIG_HMEM_REPORTING
84 struct node_hmem_attrs hmem_attrs;
85#endif
Keith Busch08d9dbe2019-03-11 14:56:00 -060086};
87#define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
88
89static struct attribute *node_init_access_node_attrs[] = {
90 NULL,
91};
92
93static struct attribute *node_targ_access_node_attrs[] = {
94 NULL,
95};
96
97static const struct attribute_group initiators = {
98 .name = "initiators",
99 .attrs = node_init_access_node_attrs,
100};
101
102static const struct attribute_group targets = {
103 .name = "targets",
104 .attrs = node_targ_access_node_attrs,
105};
106
107static const struct attribute_group *node_access_node_groups[] = {
108 &initiators,
109 &targets,
110 NULL,
111};
112
113static void node_remove_accesses(struct node *node)
114{
115 struct node_access_nodes *c, *cnext;
116
117 list_for_each_entry_safe(c, cnext, &node->access_list, list_node) {
118 list_del(&c->list_node);
119 device_unregister(&c->dev);
120 }
121}
122
123static void node_access_release(struct device *dev)
124{
125 kfree(to_access_nodes(dev));
126}
127
128static struct node_access_nodes *node_init_node_access(struct node *node,
Jinchao Wange7deeb92021-06-29 01:19:07 +0800129 unsigned int access)
Keith Busch08d9dbe2019-03-11 14:56:00 -0600130{
131 struct node_access_nodes *access_node;
132 struct device *dev;
133
134 list_for_each_entry(access_node, &node->access_list, list_node)
135 if (access_node->access == access)
136 return access_node;
137
138 access_node = kzalloc(sizeof(*access_node), GFP_KERNEL);
139 if (!access_node)
140 return NULL;
141
142 access_node->access = access;
143 dev = &access_node->dev;
144 dev->parent = &node->dev;
145 dev->release = node_access_release;
146 dev->groups = node_access_node_groups;
147 if (dev_set_name(dev, "access%u", access))
148 goto free;
149
150 if (device_register(dev))
151 goto free_name;
152
153 pm_runtime_no_callbacks(dev);
154 list_add_tail(&access_node->list_node, &node->access_list);
155 return access_node;
156free_name:
157 kfree_const(dev->kobj.name);
158free:
159 kfree(access_node);
160 return NULL;
161}
162
Keith Busche1cf33a2019-03-11 14:56:01 -0600163#ifdef CONFIG_HMEM_REPORTING
Joe Perches948b3ed2020-09-16 13:40:42 -0700164#define ACCESS_ATTR(name) \
165static ssize_t name##_show(struct device *dev, \
166 struct device_attribute *attr, \
167 char *buf) \
168{ \
169 return sysfs_emit(buf, "%u\n", \
170 to_access_nodes(dev)->hmem_attrs.name); \
171} \
Joe Perches6284a6e2020-09-16 13:40:45 -0700172static DEVICE_ATTR_RO(name)
Keith Busche1cf33a2019-03-11 14:56:01 -0600173
Joe Perches6284a6e2020-09-16 13:40:45 -0700174ACCESS_ATTR(read_bandwidth);
175ACCESS_ATTR(read_latency);
176ACCESS_ATTR(write_bandwidth);
177ACCESS_ATTR(write_latency);
Keith Busche1cf33a2019-03-11 14:56:01 -0600178
179static struct attribute *access_attrs[] = {
180 &dev_attr_read_bandwidth.attr,
181 &dev_attr_read_latency.attr,
182 &dev_attr_write_bandwidth.attr,
183 &dev_attr_write_latency.attr,
184 NULL,
185};
186
187/**
188 * node_set_perf_attrs - Set the performance values for given access class
189 * @nid: Node identifier to be set
190 * @hmem_attrs: Heterogeneous memory performance attributes
191 * @access: The access class the for the given attributes
192 */
193void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
Jinchao Wange7deeb92021-06-29 01:19:07 +0800194 unsigned int access)
Keith Busche1cf33a2019-03-11 14:56:01 -0600195{
196 struct node_access_nodes *c;
197 struct node *node;
198 int i;
199
200 if (WARN_ON_ONCE(!node_online(nid)))
201 return;
202
203 node = node_devices[nid];
204 c = node_init_node_access(node, access);
205 if (!c)
206 return;
207
208 c->hmem_attrs = *hmem_attrs;
209 for (i = 0; access_attrs[i] != NULL; i++) {
210 if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
211 "initiators")) {
212 pr_info("failed to add performance attribute to node %d\n",
213 nid);
214 break;
215 }
216 }
217}
Keith Buschacc02a12019-03-11 14:56:02 -0600218
219/**
220 * struct node_cache_info - Internal tracking for memory node caches
221 * @dev: Device represeting the cache level
222 * @node: List element for tracking in the node
223 * @cache_attrs:Attributes for this cache level
224 */
225struct node_cache_info {
226 struct device dev;
227 struct list_head node;
228 struct node_cache_attrs cache_attrs;
229};
230#define to_cache_info(device) container_of(device, struct node_cache_info, dev)
231
232#define CACHE_ATTR(name, fmt) \
233static ssize_t name##_show(struct device *dev, \
234 struct device_attribute *attr, \
235 char *buf) \
236{ \
Joe Perches948b3ed2020-09-16 13:40:42 -0700237 return sysfs_emit(buf, fmt "\n", \
238 to_cache_info(dev)->cache_attrs.name); \
Keith Buschacc02a12019-03-11 14:56:02 -0600239} \
Ruiqi Gongfd03c072021-05-14 10:05:48 +0800240static DEVICE_ATTR_RO(name);
Keith Buschacc02a12019-03-11 14:56:02 -0600241
242CACHE_ATTR(size, "%llu")
243CACHE_ATTR(line_size, "%u")
244CACHE_ATTR(indexing, "%u")
245CACHE_ATTR(write_policy, "%u")
246
247static struct attribute *cache_attrs[] = {
248 &dev_attr_indexing.attr,
249 &dev_attr_size.attr,
250 &dev_attr_line_size.attr,
251 &dev_attr_write_policy.attr,
252 NULL,
253};
254ATTRIBUTE_GROUPS(cache);
255
256static void node_cache_release(struct device *dev)
257{
258 kfree(dev);
259}
260
261static void node_cacheinfo_release(struct device *dev)
262{
263 struct node_cache_info *info = to_cache_info(dev);
264 kfree(info);
265}
266
267static void node_init_cache_dev(struct node *node)
268{
269 struct device *dev;
270
271 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
272 if (!dev)
273 return;
274
Dan Carpenter4ce535e2021-04-09 14:01:57 +0300275 device_initialize(dev);
Keith Buschacc02a12019-03-11 14:56:02 -0600276 dev->parent = &node->dev;
277 dev->release = node_cache_release;
278 if (dev_set_name(dev, "memory_side_cache"))
Dan Carpenter4ce535e2021-04-09 14:01:57 +0300279 goto put_device;
Keith Buschacc02a12019-03-11 14:56:02 -0600280
Dan Carpenter4ce535e2021-04-09 14:01:57 +0300281 if (device_add(dev))
282 goto put_device;
Keith Buschacc02a12019-03-11 14:56:02 -0600283
284 pm_runtime_no_callbacks(dev);
285 node->cache_dev = dev;
286 return;
Dan Carpenter4ce535e2021-04-09 14:01:57 +0300287put_device:
288 put_device(dev);
Keith Buschacc02a12019-03-11 14:56:02 -0600289}
290
291/**
292 * node_add_cache() - add cache attribute to a memory node
293 * @nid: Node identifier that has new cache attributes
294 * @cache_attrs: Attributes for the cache being added
295 */
296void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
297{
298 struct node_cache_info *info;
299 struct device *dev;
300 struct node *node;
301
302 if (!node_online(nid) || !node_devices[nid])
303 return;
304
305 node = node_devices[nid];
306 list_for_each_entry(info, &node->cache_attrs, node) {
307 if (info->cache_attrs.level == cache_attrs->level) {
308 dev_warn(&node->dev,
309 "attempt to add duplicate cache level:%d\n",
310 cache_attrs->level);
311 return;
312 }
313 }
314
315 if (!node->cache_dev)
316 node_init_cache_dev(node);
317 if (!node->cache_dev)
318 return;
319
320 info = kzalloc(sizeof(*info), GFP_KERNEL);
321 if (!info)
322 return;
323
324 dev = &info->dev;
Dan Carpenter4ce535e2021-04-09 14:01:57 +0300325 device_initialize(dev);
Keith Buschacc02a12019-03-11 14:56:02 -0600326 dev->parent = node->cache_dev;
327 dev->release = node_cacheinfo_release;
328 dev->groups = cache_groups;
329 if (dev_set_name(dev, "index%d", cache_attrs->level))
Dan Carpenter4ce535e2021-04-09 14:01:57 +0300330 goto put_device;
Keith Buschacc02a12019-03-11 14:56:02 -0600331
332 info->cache_attrs = *cache_attrs;
Dan Carpenter4ce535e2021-04-09 14:01:57 +0300333 if (device_add(dev)) {
Keith Buschacc02a12019-03-11 14:56:02 -0600334 dev_warn(&node->dev, "failed to add cache level:%d\n",
335 cache_attrs->level);
Dan Carpenter4ce535e2021-04-09 14:01:57 +0300336 goto put_device;
Keith Buschacc02a12019-03-11 14:56:02 -0600337 }
338 pm_runtime_no_callbacks(dev);
339 list_add_tail(&info->node, &node->cache_attrs);
340 return;
Dan Carpenter4ce535e2021-04-09 14:01:57 +0300341put_device:
342 put_device(dev);
Keith Buschacc02a12019-03-11 14:56:02 -0600343}
344
345static void node_remove_caches(struct node *node)
346{
347 struct node_cache_info *info, *next;
348
349 if (!node->cache_dev)
350 return;
351
352 list_for_each_entry_safe(info, next, &node->cache_attrs, node) {
353 list_del(&info->node);
354 device_unregister(&info->dev);
355 }
356 device_unregister(node->cache_dev);
357}
358
359static void node_init_caches(unsigned int nid)
360{
361 INIT_LIST_HEAD(&node_devices[nid]->cache_attrs);
362}
363#else
364static void node_init_caches(unsigned int nid) { }
365static void node_remove_caches(struct node *node) { }
Keith Busche1cf33a2019-03-11 14:56:01 -0600366#endif
367
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368#define K(x) ((x) << (PAGE_SHIFT - 10))
Kay Sievers10fbcf42011-12-21 14:48:43 -0800369static ssize_t node_read_meminfo(struct device *dev,
370 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
Joe Perches948b3ed2020-09-16 13:40:42 -0700372 int len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 int nid = dev->id;
Mel Gorman599d0c92016-07-28 15:45:31 -0700374 struct pglist_data *pgdat = NODE_DATA(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 struct sysinfo i;
Vlastimil Babka61f94e12018-10-26 15:05:50 -0700376 unsigned long sreclaimable, sunreclaimable;
Shakeel Buttb6038942021-02-24 12:03:55 -0800377 unsigned long swapcached = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
379 si_meminfo_node(&i, nid);
Roman Gushchind42f3242020-08-06 23:20:39 -0700380 sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B);
381 sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B);
Shakeel Buttb6038942021-02-24 12:03:55 -0800382#ifdef CONFIG_SWAP
383 swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE);
384#endif
Joe Perches948b3ed2020-09-16 13:40:42 -0700385 len = sysfs_emit_at(buf, len,
386 "Node %d MemTotal: %8lu kB\n"
387 "Node %d MemFree: %8lu kB\n"
388 "Node %d MemUsed: %8lu kB\n"
Shakeel Buttb6038942021-02-24 12:03:55 -0800389 "Node %d SwapCached: %8lu kB\n"
Joe Perches948b3ed2020-09-16 13:40:42 -0700390 "Node %d Active: %8lu kB\n"
391 "Node %d Inactive: %8lu kB\n"
392 "Node %d Active(anon): %8lu kB\n"
393 "Node %d Inactive(anon): %8lu kB\n"
394 "Node %d Active(file): %8lu kB\n"
395 "Node %d Inactive(file): %8lu kB\n"
396 "Node %d Unevictable: %8lu kB\n"
397 "Node %d Mlocked: %8lu kB\n",
398 nid, K(i.totalram),
399 nid, K(i.freeram),
400 nid, K(i.totalram - i.freeram),
Shakeel Buttb6038942021-02-24 12:03:55 -0800401 nid, K(swapcached),
Joe Perches948b3ed2020-09-16 13:40:42 -0700402 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
403 node_page_state(pgdat, NR_ACTIVE_FILE)),
404 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
405 node_page_state(pgdat, NR_INACTIVE_FILE)),
406 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
407 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
408 nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
409 nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
410 nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
411 nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
KOSAKI Motohiro7ee92252010-08-09 17:19:50 -0700412
Christoph Lameter182e8e22006-09-25 23:31:10 -0700413#ifdef CONFIG_HIGHMEM
Joe Perches948b3ed2020-09-16 13:40:42 -0700414 len += sysfs_emit_at(buf, len,
415 "Node %d HighTotal: %8lu kB\n"
416 "Node %d HighFree: %8lu kB\n"
417 "Node %d LowTotal: %8lu kB\n"
418 "Node %d LowFree: %8lu kB\n",
419 nid, K(i.totalhigh),
420 nid, K(i.freehigh),
421 nid, K(i.totalram - i.totalhigh),
422 nid, K(i.freeram - i.freehigh));
Christoph Lameter182e8e22006-09-25 23:31:10 -0700423#endif
Joe Perches948b3ed2020-09-16 13:40:42 -0700424 len += sysfs_emit_at(buf, len,
425 "Node %d Dirty: %8lu kB\n"
426 "Node %d Writeback: %8lu kB\n"
427 "Node %d FilePages: %8lu kB\n"
428 "Node %d Mapped: %8lu kB\n"
429 "Node %d AnonPages: %8lu kB\n"
430 "Node %d Shmem: %8lu kB\n"
431 "Node %d KernelStack: %8lu kB\n"
Sami Tolvanen628d06a2020-04-27 09:00:08 -0700432#ifdef CONFIG_SHADOW_CALL_STACK
Joe Perches948b3ed2020-09-16 13:40:42 -0700433 "Node %d ShadowCallStack:%8lu kB\n"
Sami Tolvanen628d06a2020-04-27 09:00:08 -0700434#endif
Joe Perches948b3ed2020-09-16 13:40:42 -0700435 "Node %d PageTables: %8lu kB\n"
436 "Node %d NFS_Unstable: %8lu kB\n"
437 "Node %d Bounce: %8lu kB\n"
438 "Node %d WritebackTmp: %8lu kB\n"
439 "Node %d KReclaimable: %8lu kB\n"
440 "Node %d Slab: %8lu kB\n"
441 "Node %d SReclaimable: %8lu kB\n"
442 "Node %d SUnreclaim: %8lu kB\n"
David Rientjes05b258e2011-01-13 15:47:14 -0800443#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Joe Perches948b3ed2020-09-16 13:40:42 -0700444 "Node %d AnonHugePages: %8lu kB\n"
445 "Node %d ShmemHugePages: %8lu kB\n"
446 "Node %d ShmemPmdMapped: %8lu kB\n"
447 "Node %d FileHugePages: %8lu kB\n"
448 "Node %d FilePmdMapped: %8lu kB\n"
David Rientjes05b258e2011-01-13 15:47:14 -0800449#endif
Joe Perches948b3ed2020-09-16 13:40:42 -0700450 ,
451 nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
452 nid, K(node_page_state(pgdat, NR_WRITEBACK)),
453 nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
454 nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
455 nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
456 nid, K(i.sharedram),
457 nid, node_page_state(pgdat, NR_KERNEL_STACK_KB),
Sami Tolvanen628d06a2020-04-27 09:00:08 -0700458#ifdef CONFIG_SHADOW_CALL_STACK
Joe Perches948b3ed2020-09-16 13:40:42 -0700459 nid, node_page_state(pgdat, NR_KERNEL_SCS_KB),
Sami Tolvanen628d06a2020-04-27 09:00:08 -0700460#endif
Shakeel Buttf0c0c112020-12-14 19:07:17 -0800461 nid, K(node_page_state(pgdat, NR_PAGETABLE)),
Joe Perches948b3ed2020-09-16 13:40:42 -0700462 nid, 0UL,
463 nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
464 nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
465 nid, K(sreclaimable +
466 node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
467 nid, K(sreclaimable + sunreclaimable),
468 nid, K(sreclaimable),
469 nid, K(sunreclaimable)
David Rientjes05b258e2011-01-13 15:47:14 -0800470#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Joe Perches948b3ed2020-09-16 13:40:42 -0700471 ,
Muchun Song69473e52021-02-24 12:03:23 -0800472 nid, K(node_page_state(pgdat, NR_ANON_THPS)),
Muchun Song57b28472021-02-24 12:03:31 -0800473 nid, K(node_page_state(pgdat, NR_SHMEM_THPS)),
Muchun Songa1528e22021-02-24 12:03:35 -0800474 nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
Muchun Songbf9ecea2021-02-24 12:03:27 -0800475 nid, K(node_page_state(pgdat, NR_FILE_THPS)),
Muchun Song380780e2021-02-24 12:03:39 -0800476 nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED))
David Rientjes05b258e2011-01-13 15:47:14 -0800477#endif
Joe Perches948b3ed2020-09-16 13:40:42 -0700478 );
Joe Perches79815932020-09-16 13:40:43 -0700479 len += hugetlb_report_node_meminfo(buf, len, nid);
Joe Perches948b3ed2020-09-16 13:40:42 -0700480 return len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481}
482
483#undef K
Joe Perches948b3ed2020-09-16 13:40:42 -0700484static DEVICE_ATTR(meminfo, 0444, node_read_meminfo, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
Kay Sievers10fbcf42011-12-21 14:48:43 -0800486static ssize_t node_read_numastat(struct device *dev,
Joe Perches948b3ed2020-09-16 13:40:42 -0700487 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488{
Mel Gormanf19298b2021-06-28 19:41:44 -0700489 fold_vm_numa_events();
Joe Perchesaa838892020-09-16 13:40:39 -0700490 return sysfs_emit(buf,
491 "numa_hit %lu\n"
492 "numa_miss %lu\n"
493 "numa_foreign %lu\n"
494 "interleave_hit %lu\n"
495 "local_node %lu\n"
496 "other_node %lu\n",
Mel Gormanf19298b2021-06-28 19:41:44 -0700497 sum_zone_numa_event_state(dev->id, NUMA_HIT),
498 sum_zone_numa_event_state(dev->id, NUMA_MISS),
499 sum_zone_numa_event_state(dev->id, NUMA_FOREIGN),
500 sum_zone_numa_event_state(dev->id, NUMA_INTERLEAVE_HIT),
501 sum_zone_numa_event_state(dev->id, NUMA_LOCAL),
502 sum_zone_numa_event_state(dev->id, NUMA_OTHER));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503}
Joe Perches948b3ed2020-09-16 13:40:42 -0700504static DEVICE_ATTR(numastat, 0444, node_read_numastat, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
Kay Sievers10fbcf42011-12-21 14:48:43 -0800506static ssize_t node_read_vmstat(struct device *dev,
507 struct device_attribute *attr, char *buf)
Michael Rubin2ac39032010-10-26 14:21:35 -0700508{
509 int nid = dev->id;
Mel Gorman75ef7182016-07-28 15:45:24 -0700510 struct pglist_data *pgdat = NODE_DATA(nid);
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700511 int i;
Joe Perches948b3ed2020-09-16 13:40:42 -0700512 int len = 0;
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700513
514 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
Joe Perches948b3ed2020-09-16 13:40:42 -0700515 len += sysfs_emit_at(buf, len, "%s %lu\n",
516 zone_stat_name(i),
517 sum_zone_node_page_state(nid, i));
Mel Gorman75ef7182016-07-28 15:45:24 -0700518
Kemi Wang3a321d22017-09-08 16:12:48 -0700519#ifdef CONFIG_NUMA
Mel Gormanf19298b2021-06-28 19:41:44 -0700520 fold_vm_numa_events();
521 for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
Joe Perches948b3ed2020-09-16 13:40:42 -0700522 len += sysfs_emit_at(buf, len, "%s %lu\n",
523 numa_stat_name(i),
Mel Gormanf19298b2021-06-28 19:41:44 -0700524 sum_zone_numa_event_state(nid, i));
Joe Perches948b3ed2020-09-16 13:40:42 -0700525
Kemi Wang3a321d22017-09-08 16:12:48 -0700526#endif
Muchun Song69473e52021-02-24 12:03:23 -0800527 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
528 unsigned long pages = node_page_state_pages(pgdat, i);
529
530 if (vmstat_item_print_in_thp(i))
531 pages /= HPAGE_PMD_NR;
532 len += sysfs_emit_at(buf, len, "%s %lu\n", node_stat_name(i),
533 pages);
534 }
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700535
Joe Perches948b3ed2020-09-16 13:40:42 -0700536 return len;
Michael Rubin2ac39032010-10-26 14:21:35 -0700537}
Joe Perches948b3ed2020-09-16 13:40:42 -0700538static DEVICE_ATTR(vmstat, 0444, node_read_vmstat, NULL);
Michael Rubin2ac39032010-10-26 14:21:35 -0700539
Kay Sievers10fbcf42011-12-21 14:48:43 -0800540static ssize_t node_read_distance(struct device *dev,
Joe Perches948b3ed2020-09-16 13:40:42 -0700541 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542{
543 int nid = dev->id;
544 int len = 0;
545 int i;
546
David Rientjes12ee3c02010-03-10 14:50:21 -0800547 /*
548 * buf is currently PAGE_SIZE in length and each node needs 4 chars
549 * at the most (distance + space or newline).
550 */
551 BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
Joe Perches948b3ed2020-09-16 13:40:42 -0700553 for_each_online_node(i) {
554 len += sysfs_emit_at(buf, len, "%s%d",
555 i ? " " : "", node_distance(nid, i));
556 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
Joe Perches948b3ed2020-09-16 13:40:42 -0700558 len += sysfs_emit_at(buf, len, "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 return len;
560}
Joe Perches948b3ed2020-09-16 13:40:42 -0700561static DEVICE_ATTR(distance, 0444, node_read_distance, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
Takashi Iwai3c9b8aa2015-01-29 12:29:22 +0100563static struct attribute *node_dev_attrs[] = {
Takashi Iwai3c9b8aa2015-01-29 12:29:22 +0100564 &dev_attr_meminfo.attr,
565 &dev_attr_numastat.attr,
566 &dev_attr_distance.attr,
567 &dev_attr_vmstat.attr,
568 NULL
569};
Tian Tao75bd50f2021-08-06 23:02:50 +1200570
571static struct bin_attribute *node_dev_bin_attrs[] = {
572 &bin_attr_cpumap,
573 &bin_attr_cpulist,
574 NULL
575};
576
577static const struct attribute_group node_dev_group = {
578 .attrs = node_dev_attrs,
579 .bin_attrs = node_dev_bin_attrs
580};
581
582static const struct attribute_group *node_dev_groups[] = {
583 &node_dev_group,
Jarkko Sakkinen50468e42021-11-16 18:21:16 +0200584#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
585 &arch_node_dev_group,
586#endif
Tian Tao75bd50f2021-08-06 23:02:50 +1200587 NULL
588};
Takashi Iwai3c9b8aa2015-01-29 12:29:22 +0100589
Lee Schermerhorn9a3052302009-12-14 17:58:25 -0800590#ifdef CONFIG_HUGETLBFS
591/*
592 * hugetlbfs per node attributes registration interface:
593 * When/if hugetlb[fs] subsystem initializes [sometime after this module],
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800594 * it will register its per node attributes for all online nodes with
595 * memory. It will also call register_hugetlbfs_with_node(), below, to
Lee Schermerhorn9a3052302009-12-14 17:58:25 -0800596 * register its attribute registration functions with this node driver.
597 * Once these hooks have been initialized, the node driver will call into
598 * the hugetlb module to [un]register attributes for hot-plugged nodes.
599 */
600static node_registration_func_t __hugetlb_register_node;
601static node_registration_func_t __hugetlb_unregister_node;
602
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800603static inline bool hugetlb_register_node(struct node *node)
Lee Schermerhorn9a3052302009-12-14 17:58:25 -0800604{
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800605 if (__hugetlb_register_node &&
Lai Jiangshan8cebfcd2012-12-12 13:51:36 -0800606 node_state(node->dev.id, N_MEMORY)) {
Lee Schermerhorn9a3052302009-12-14 17:58:25 -0800607 __hugetlb_register_node(node);
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800608 return true;
609 }
610 return false;
Lee Schermerhorn9a3052302009-12-14 17:58:25 -0800611}
612
613static inline void hugetlb_unregister_node(struct node *node)
614{
615 if (__hugetlb_unregister_node)
616 __hugetlb_unregister_node(node);
617}
618
619void register_hugetlbfs_with_node(node_registration_func_t doregister,
620 node_registration_func_t unregister)
621{
622 __hugetlb_register_node = doregister;
623 __hugetlb_unregister_node = unregister;
624}
625#else
626static inline void hugetlb_register_node(struct node *node) {}
627
628static inline void hugetlb_unregister_node(struct node *node) {}
629#endif
630
Yasuaki Ishimatsu8c7b5b42012-12-11 16:00:57 -0800631static void node_device_release(struct device *dev)
632{
633 struct node *node = to_node(dev);
634
David Hildenbrand50f94812021-11-05 13:44:24 -0700635#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_HUGETLBFS)
Yasuaki Ishimatsu8c7b5b42012-12-11 16:00:57 -0800636 /*
637 * We schedule the work only when a memory section is
638 * onlined/offlined on this node. When we come here,
639 * all the memory on this node has been offlined,
640 * so we won't enqueue new work to this work.
641 *
642 * The work is using node->node_work, so we should
643 * flush work before freeing the memory.
644 */
645 flush_work(&node->node_work);
646#endif
647 kfree(node);
648}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
650/*
Robert P. J. Day405ae7d2007-02-17 19:13:42 +0100651 * register_node - Setup a sysfs device for a node.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 * @num - Node number to use when creating the device.
653 *
654 * Initialize and register the node device.
655 */
Dou Liyanga7be6e52017-07-10 15:49:20 -0700656static int register_node(struct node *node, int num)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657{
658 int error;
659
Kay Sievers10fbcf42011-12-21 14:48:43 -0800660 node->dev.id = num;
661 node->dev.bus = &node_subsys;
Yasuaki Ishimatsu8c7b5b42012-12-11 16:00:57 -0800662 node->dev.release = node_device_release;
Greg Kroah-Hartman7ca7ec42015-03-25 13:47:17 +0100663 node->dev.groups = node_dev_groups;
Kay Sievers10fbcf42011-12-21 14:48:43 -0800664 error = device_register(&node->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
Arvind Yadavc1cc0d52018-03-11 11:25:50 +0530666 if (error)
667 put_device(&node->dev);
668 else {
Lee Schermerhorn9a3052302009-12-14 17:58:25 -0800669 hugetlb_register_node(node);
Mel Gormaned4a6d72010-05-24 14:32:29 -0700670
671 compaction_register_node(node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 }
673 return error;
674}
675
Keiichiro Tokunaga4b450992005-05-08 21:28:53 +0900676/**
677 * unregister_node - unregister a node device
678 * @node: node going away
679 *
680 * Unregisters a node device @node. All the devices on the node must be
681 * unregistered before calling this function.
682 */
683void unregister_node(struct node *node)
684{
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800685 hugetlb_unregister_node(node); /* no-op, if memoryless node */
Keith Busch08d9dbe2019-03-11 14:56:00 -0600686 node_remove_accesses(node);
Keith Buschacc02a12019-03-11 14:56:02 -0600687 node_remove_caches(node);
Kay Sievers10fbcf42011-12-21 14:48:43 -0800688 device_unregister(&node->dev);
Keiichiro Tokunaga4b450992005-05-08 21:28:53 +0900689}
690
Wen Congyang87327942012-12-11 16:00:56 -0800691struct node *node_devices[MAX_NUMNODES];
Yasunori Goto0fc44152006-06-27 02:53:38 -0700692
KAMEZAWA Hiroyuki76b67ed2006-06-27 02:53:41 -0700693/*
694 * register cpu under node
695 */
696int register_cpu_under_node(unsigned int cpu, unsigned int nid)
697{
Alex Chiang18307942009-12-14 17:59:08 -0800698 int ret;
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800699 struct device *obj;
KAMEZAWA Hiroyuki76b67ed2006-06-27 02:53:41 -0700700
Alex Chiangf8246f32009-12-14 17:59:06 -0800701 if (!node_online(nid))
702 return 0;
703
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800704 obj = get_cpu_device(cpu);
Alex Chiangf8246f32009-12-14 17:59:06 -0800705 if (!obj)
706 return 0;
707
Wen Congyang87327942012-12-11 16:00:56 -0800708 ret = sysfs_create_link(&node_devices[nid]->dev.kobj,
Alex Chiangf8246f32009-12-14 17:59:06 -0800709 &obj->kobj,
710 kobject_name(&obj->kobj));
Alex Chiang18307942009-12-14 17:59:08 -0800711 if (ret)
712 return ret;
713
714 return sysfs_create_link(&obj->kobj,
Wen Congyang87327942012-12-11 16:00:56 -0800715 &node_devices[nid]->dev.kobj,
716 kobject_name(&node_devices[nid]->dev.kobj));
KAMEZAWA Hiroyuki76b67ed2006-06-27 02:53:41 -0700717}
718
Keith Busch08d9dbe2019-03-11 14:56:00 -0600719/**
720 * register_memory_node_under_compute_node - link memory node to its compute
721 * node for a given access class.
Mauro Carvalho Chehab58cb3462019-06-18 15:55:12 -0300722 * @mem_nid: Memory node number
723 * @cpu_nid: Cpu node number
Keith Busch08d9dbe2019-03-11 14:56:00 -0600724 * @access: Access class to register
725 *
726 * Description:
727 * For use with platforms that may have separate memory and compute nodes.
728 * This function will export node relationships linking which memory
729 * initiator nodes can access memory targets at a given ranked access
730 * class.
731 */
732int register_memory_node_under_compute_node(unsigned int mem_nid,
733 unsigned int cpu_nid,
Jinchao Wange7deeb92021-06-29 01:19:07 +0800734 unsigned int access)
Keith Busch08d9dbe2019-03-11 14:56:00 -0600735{
736 struct node *init_node, *targ_node;
737 struct node_access_nodes *initiator, *target;
738 int ret;
739
740 if (!node_online(cpu_nid) || !node_online(mem_nid))
741 return -ENODEV;
742
743 init_node = node_devices[cpu_nid];
744 targ_node = node_devices[mem_nid];
745 initiator = node_init_node_access(init_node, access);
746 target = node_init_node_access(targ_node, access);
747 if (!initiator || !target)
748 return -ENOMEM;
749
750 ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets",
751 &targ_node->dev.kobj,
752 dev_name(&targ_node->dev));
753 if (ret)
754 return ret;
755
756 ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators",
757 &init_node->dev.kobj,
758 dev_name(&init_node->dev));
759 if (ret)
760 goto err;
761
762 return 0;
763 err:
764 sysfs_remove_link_from_group(&initiator->dev.kobj, "targets",
765 dev_name(&targ_node->dev));
766 return ret;
767}
768
KAMEZAWA Hiroyuki76b67ed2006-06-27 02:53:41 -0700769int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
770{
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800771 struct device *obj;
Alex Chiangb9d52da2009-12-14 17:59:07 -0800772
773 if (!node_online(nid))
774 return 0;
775
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800776 obj = get_cpu_device(cpu);
Alex Chiangb9d52da2009-12-14 17:59:07 -0800777 if (!obj)
778 return 0;
779
Wen Congyang87327942012-12-11 16:00:56 -0800780 sysfs_remove_link(&node_devices[nid]->dev.kobj,
Alex Chiangb9d52da2009-12-14 17:59:07 -0800781 kobject_name(&obj->kobj));
Alex Chiang18307942009-12-14 17:59:08 -0800782 sysfs_remove_link(&obj->kobj,
Wen Congyang87327942012-12-11 16:00:56 -0800783 kobject_name(&node_devices[nid]->dev.kobj));
Alex Chiangb9d52da2009-12-14 17:59:07 -0800784
KAMEZAWA Hiroyuki76b67ed2006-06-27 02:53:41 -0700785 return 0;
786}
787
David Hildenbrand50f94812021-11-05 13:44:24 -0700788#ifdef CONFIG_MEMORY_HOTPLUG
Fabian Frederickbd721ea2016-08-02 14:03:33 -0700789static int __ref get_nid_for_pfn(unsigned long pfn)
Gary Hadec04fc582009-01-06 14:39:14 -0800790{
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700791#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
Thomas Gleixner8cdde382017-05-16 20:42:39 +0200792 if (system_state < SYSTEM_RUNNING)
Mel Gorman3a80a7f2015-06-30 14:57:02 -0700793 return early_pfn_to_nid(pfn);
794#endif
Gary Hadec04fc582009-01-06 14:39:14 -0800795 return pfn_to_nid(pfn);
796}
797
Laurent Dufour90c7eae2020-10-15 20:09:15 -0700798static void do_register_memory_block_under_node(int nid,
799 struct memory_block *mem_blk)
Laurent Dufourf85086f2020-09-25 21:19:31 -0700800{
801 int ret;
802
803 /*
804 * If this memory block spans multiple nodes, we only indicate
805 * the last processed node.
806 */
807 mem_blk->nid = nid;
808
809 ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
810 &mem_blk->dev.kobj,
811 kobject_name(&mem_blk->dev.kobj));
Laurent Dufour90c7eae2020-10-15 20:09:15 -0700812 if (ret && ret != -EEXIST)
813 dev_err_ratelimited(&node_devices[nid]->dev,
814 "can't create link to %s in sysfs (%d)\n",
815 kobject_name(&mem_blk->dev.kobj), ret);
Laurent Dufourf85086f2020-09-25 21:19:31 -0700816
Laurent Dufour90c7eae2020-10-15 20:09:15 -0700817 ret = sysfs_create_link_nowarn(&mem_blk->dev.kobj,
Laurent Dufourf85086f2020-09-25 21:19:31 -0700818 &node_devices[nid]->dev.kobj,
819 kobject_name(&node_devices[nid]->dev.kobj));
Laurent Dufour90c7eae2020-10-15 20:09:15 -0700820 if (ret && ret != -EEXIST)
821 dev_err_ratelimited(&mem_blk->dev,
822 "can't create link to %s in sysfs (%d)\n",
823 kobject_name(&node_devices[nid]->dev.kobj),
824 ret);
Laurent Dufourf85086f2020-09-25 21:19:31 -0700825}
826
Gary Hadec04fc582009-01-06 14:39:14 -0800827/* register memory section under specified node if it spans that node */
Laurent Dufourf85086f2020-09-25 21:19:31 -0700828static int register_mem_block_under_node_early(struct memory_block *mem_blk,
829 void *arg)
Gary Hadec04fc582009-01-06 14:39:14 -0800830{
David Hildenbrandb6c88d32019-09-23 15:35:49 -0700831 unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
832 unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
833 unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
Laurent Dufourf85086f2020-09-25 21:19:31 -0700834 int nid = *(int *)arg;
David Hildenbrandb6c88d32019-09-23 15:35:49 -0700835 unsigned long pfn;
Gary Hadec04fc582009-01-06 14:39:14 -0800836
David Hildenbrandb6c88d32019-09-23 15:35:49 -0700837 for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
Gary Hadec04fc582009-01-06 14:39:14 -0800838 int page_nid;
839
Yinghai Lu04697852015-09-04 15:42:39 -0700840 /*
841 * memory block could have several absent sections from start.
842 * skip pfn range from absent section
843 */
Pingfan Liue03d1f72020-04-01 21:09:27 -0700844 if (!pfn_in_present_section(pfn)) {
Yinghai Lu04697852015-09-04 15:42:39 -0700845 pfn = round_down(pfn + PAGES_PER_SECTION,
846 PAGES_PER_SECTION) - 1;
847 continue;
848 }
849
Pavel Tatashinfc44f7f2018-04-05 16:22:56 -0700850 /*
Laurent Dufourf85086f2020-09-25 21:19:31 -0700851 * We need to check if page belongs to nid only at the boot
852 * case because node's ranges can be interleaved.
Pavel Tatashinfc44f7f2018-04-05 16:22:56 -0700853 */
Laurent Dufourf85086f2020-09-25 21:19:31 -0700854 page_nid = get_nid_for_pfn(pfn);
855 if (page_nid < 0)
856 continue;
857 if (page_nid != nid)
858 continue;
David Hildenbrandd84f2f52019-09-23 15:35:40 -0700859
Laurent Dufour90c7eae2020-10-15 20:09:15 -0700860 do_register_memory_block_under_node(nid, mem_blk);
861 return 0;
Gary Hadec04fc582009-01-06 14:39:14 -0800862 }
863 /* mem section does not span the specified node */
864 return 0;
865}
866
David Hildenbrand4c4b7f92019-07-18 15:57:06 -0700867/*
Laurent Dufourf85086f2020-09-25 21:19:31 -0700868 * During hotplug we know that all pages in the memory block belong to the same
869 * node.
870 */
871static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
872 void *arg)
873{
874 int nid = *(int *)arg;
875
Laurent Dufour90c7eae2020-10-15 20:09:15 -0700876 do_register_memory_block_under_node(nid, mem_blk);
877 return 0;
Laurent Dufourf85086f2020-09-25 21:19:31 -0700878}
879
880/*
David Hildenbrandd84f2f52019-09-23 15:35:40 -0700881 * Unregister a memory block device under the node it spans. Memory blocks
882 * with multiple nodes cannot be offlined and therefore also never be removed.
David Hildenbrand4c4b7f92019-07-18 15:57:06 -0700883 */
David Hildenbranda31b2642019-07-18 15:57:12 -0700884void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
Gary Hadec04fc582009-01-06 14:39:14 -0800885{
David Hildenbrandd84f2f52019-09-23 15:35:40 -0700886 if (mem_blk->nid == NUMA_NO_NODE)
887 return;
Gary Hadec04fc582009-01-06 14:39:14 -0800888
David Hildenbrandd84f2f52019-09-23 15:35:40 -0700889 sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj,
890 kobject_name(&mem_blk->dev.kobj));
891 sysfs_remove_link(&mem_blk->dev.kobj,
892 kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
Gary Hadec04fc582009-01-06 14:39:14 -0800893}
894
Laurent Dufour90c7eae2020-10-15 20:09:15 -0700895void link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
896 enum meminit_context context)
Gary Hadec04fc582009-01-06 14:39:14 -0800897{
Laurent Dufourf85086f2020-09-25 21:19:31 -0700898 walk_memory_blocks_func_t func;
899
900 if (context == MEMINIT_HOTPLUG)
901 func = register_mem_block_under_node_hotplug;
902 else
903 func = register_mem_block_under_node_early;
904
Laurent Dufour90c7eae2020-10-15 20:09:15 -0700905 walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
906 (void *)&nid, func);
907 return;
Gary Hadec04fc582009-01-06 14:39:14 -0800908}
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800909
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800910#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800911/*
912 * Handle per node hstate attribute [un]registration on transistions
913 * to/from memoryless state.
914 */
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800915static void node_hugetlb_work(struct work_struct *work)
916{
917 struct node *node = container_of(work, struct node, node_work);
918
919 /*
920 * We only get here when a node transitions to/from memoryless state.
921 * We can detect which transition occurred by examining whether the
922 * node has memory now. hugetlb_register_node() already check this
923 * so we try to register the attributes. If that fails, then the
924 * node has transitioned to memoryless, try to unregister the
925 * attributes.
926 */
927 if (!hugetlb_register_node(node))
928 hugetlb_unregister_node(node);
929}
930
931static void init_node_hugetlb_work(int nid)
932{
Wen Congyang87327942012-12-11 16:00:56 -0800933 INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work);
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800934}
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800935
936static int node_memory_callback(struct notifier_block *self,
937 unsigned long action, void *arg)
938{
939 struct memory_notify *mnb = arg;
940 int nid = mnb->status_change_nid;
941
942 switch (action) {
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800943 case MEM_ONLINE:
944 case MEM_OFFLINE:
945 /*
946 * offload per node hstate [un]registration to a work thread
947 * when transitioning to/from memoryless state.
948 */
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800949 if (nid != NUMA_NO_NODE)
Wen Congyang87327942012-12-11 16:00:56 -0800950 schedule_work(&node_devices[nid]->node_work);
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800951 break;
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800952
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800953 case MEM_GOING_ONLINE:
954 case MEM_GOING_OFFLINE:
955 case MEM_CANCEL_ONLINE:
956 case MEM_CANCEL_OFFLINE:
957 default:
958 break;
959 }
960
961 return NOTIFY_OK;
962}
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800963#endif /* CONFIG_HUGETLBFS */
David Hildenbrand50f94812021-11-05 13:44:24 -0700964#endif /* CONFIG_MEMORY_HOTPLUG */
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800965
David Hildenbrand50f94812021-11-05 13:44:24 -0700966#if !defined(CONFIG_MEMORY_HOTPLUG) || !defined(CONFIG_HUGETLBFS)
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -0800967static inline int node_memory_callback(struct notifier_block *self,
968 unsigned long action, void *arg)
969{
970 return NOTIFY_OK;
971}
Lee Schermerhorn39da08c2009-12-14 17:58:36 -0800972
973static void init_node_hugetlb_work(int nid) { }
974
975#endif
Gary Hadec04fc582009-01-06 14:39:14 -0800976
Michal Hocko9037a992017-07-06 15:37:49 -0700977int __register_one_node(int nid)
Yasunori Goto0fc44152006-06-27 02:53:38 -0700978{
Michal Hocko9037a992017-07-06 15:37:49 -0700979 int error;
KAMEZAWA Hiroyuki76b67ed2006-06-27 02:53:41 -0700980 int cpu;
Yasunori Goto0fc44152006-06-27 02:53:38 -0700981
Michal Hocko9037a992017-07-06 15:37:49 -0700982 node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
983 if (!node_devices[nid])
984 return -ENOMEM;
Yasunori Goto0fc44152006-06-27 02:53:38 -0700985
Dou Liyanga7be6e52017-07-10 15:49:20 -0700986 error = register_node(node_devices[nid], nid);
Wen Congyang87327942012-12-11 16:00:56 -0800987
Michal Hocko9037a992017-07-06 15:37:49 -0700988 /* link cpu under this node */
989 for_each_present_cpu(cpu) {
990 if (cpu_to_node(cpu) == nid)
991 register_cpu_under_node(cpu, nid);
Yasunori Goto0fc44152006-06-27 02:53:38 -0700992 }
993
Keith Busch08d9dbe2019-03-11 14:56:00 -0600994 INIT_LIST_HEAD(&node_devices[nid]->access_list);
Michal Hocko9037a992017-07-06 15:37:49 -0700995 /* initialize work queue for memory hot plug */
996 init_node_hugetlb_work(nid);
Keith Buschacc02a12019-03-11 14:56:02 -0600997 node_init_caches(nid);
Yasunori Goto0fc44152006-06-27 02:53:38 -0700998
Michal Hocko9037a992017-07-06 15:37:49 -0700999 return error;
Yasunori Goto0fc44152006-06-27 02:53:38 -07001000}
1001
1002void unregister_one_node(int nid)
1003{
Xishi Qiu92d585e2014-03-06 17:18:21 +08001004 if (!node_devices[nid])
1005 return;
1006
Wen Congyang87327942012-12-11 16:00:56 -08001007 unregister_node(node_devices[nid]);
Wen Congyang87327942012-12-11 16:00:56 -08001008 node_devices[nid] = NULL;
Yasunori Goto0fc44152006-06-27 02:53:38 -07001009}
1010
Lee Schermerhornbde631a2007-10-16 01:26:27 -07001011/*
1012 * node states attributes
1013 */
1014
Andi Kleenb15f5622010-01-05 12:47:59 +01001015struct node_attr {
Kay Sievers10fbcf42011-12-21 14:48:43 -08001016 struct device_attribute attr;
Andi Kleenb15f5622010-01-05 12:47:59 +01001017 enum node_states state;
1018};
1019
Kay Sievers10fbcf42011-12-21 14:48:43 -08001020static ssize_t show_node_state(struct device *dev,
1021 struct device_attribute *attr, char *buf)
Lee Schermerhornbde631a2007-10-16 01:26:27 -07001022{
Andi Kleenb15f5622010-01-05 12:47:59 +01001023 struct node_attr *na = container_of(attr, struct node_attr, attr);
Joe Perches948b3ed2020-09-16 13:40:42 -07001024
1025 return sysfs_emit(buf, "%*pbl\n",
1026 nodemask_pr_args(&node_states[na->state]));
Lee Schermerhornbde631a2007-10-16 01:26:27 -07001027}
1028
Andi Kleenb15f5622010-01-05 12:47:59 +01001029#define _NODE_ATTR(name, state) \
Kay Sievers10fbcf42011-12-21 14:48:43 -08001030 { __ATTR(name, 0444, show_node_state, NULL), state }
Lee Schermerhornbde631a2007-10-16 01:26:27 -07001031
Andi Kleenb15f5622010-01-05 12:47:59 +01001032static struct node_attr node_state_attr[] = {
Lai Jiangshanfcf07d22012-12-11 16:03:13 -08001033 [N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE),
1034 [N_ONLINE] = _NODE_ATTR(online, N_ONLINE),
1035 [N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
Lee Schermerhornbde631a2007-10-16 01:26:27 -07001036#ifdef CONFIG_HIGHMEM
Lai Jiangshanfcf07d22012-12-11 16:03:13 -08001037 [N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
Lee Schermerhornbde631a2007-10-16 01:26:27 -07001038#endif
Lai Jiangshan20b2f522012-12-12 13:52:00 -08001039 [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
Lai Jiangshanfcf07d22012-12-11 16:03:13 -08001040 [N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
Jonathan Cameron894c26a2020-09-30 22:05:42 +08001041 [N_GENERIC_INITIATOR] = _NODE_ATTR(has_generic_initiator,
1042 N_GENERIC_INITIATOR),
Lee Schermerhornbde631a2007-10-16 01:26:27 -07001043};
1044
Kay Sievers10fbcf42011-12-21 14:48:43 -08001045static struct attribute *node_state_attrs[] = {
Lai Jiangshanfcf07d22012-12-11 16:03:13 -08001046 &node_state_attr[N_POSSIBLE].attr.attr,
1047 &node_state_attr[N_ONLINE].attr.attr,
1048 &node_state_attr[N_NORMAL_MEMORY].attr.attr,
Andi Kleen3701cde2010-01-05 12:48:04 +01001049#ifdef CONFIG_HIGHMEM
Lai Jiangshanfcf07d22012-12-11 16:03:13 -08001050 &node_state_attr[N_HIGH_MEMORY].attr.attr,
Andi Kleen3701cde2010-01-05 12:48:04 +01001051#endif
Lai Jiangshan20b2f522012-12-12 13:52:00 -08001052 &node_state_attr[N_MEMORY].attr.attr,
Lai Jiangshanfcf07d22012-12-11 16:03:13 -08001053 &node_state_attr[N_CPU].attr.attr,
Jonathan Cameron894c26a2020-09-30 22:05:42 +08001054 &node_state_attr[N_GENERIC_INITIATOR].attr.attr,
Andi Kleen3701cde2010-01-05 12:48:04 +01001055 NULL
1056};
Lee Schermerhornbde631a2007-10-16 01:26:27 -07001057
Rikard Falkeborn5a576762021-05-28 23:34:08 +02001058static const struct attribute_group memory_root_attr_group = {
Kay Sievers10fbcf42011-12-21 14:48:43 -08001059 .attrs = node_state_attrs,
1060};
1061
1062static const struct attribute_group *cpu_root_attr_groups[] = {
1063 &memory_root_attr_group,
1064 NULL,
1065};
1066
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -08001067#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
Keiichiro Tokunaga4b450992005-05-08 21:28:53 +09001068static int __init register_node_type(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069{
Lee Schermerhornbde631a2007-10-16 01:26:27 -07001070 int ret;
1071
Andi Kleen3701cde2010-01-05 12:48:04 +01001072 BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
1073 BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
1074
Kay Sievers10fbcf42011-12-21 14:48:43 -08001075 ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -08001076 if (!ret) {
Andrew Morton6e259e72013-04-29 15:08:07 -07001077 static struct notifier_block node_memory_callback_nb = {
1078 .notifier_call = node_memory_callback,
1079 .priority = NODE_CALLBACK_PRI,
1080 };
1081 register_hotmemory_notifier(&node_memory_callback_nb);
Lee Schermerhorn4faf8d92009-12-14 17:58:35 -08001082 }
Lee Schermerhornbde631a2007-10-16 01:26:27 -07001083
1084 /*
1085 * Note: we're not going to unregister the node class if we fail
1086 * to register the node state class attribute files.
1087 */
1088 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089}
1090postcore_initcall(register_node_type);