Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 3 | * Basic Node interface support |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | */ |
| 5 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include <linux/module.h> |
| 7 | #include <linux/init.h> |
| 8 | #include <linux/mm.h> |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 9 | #include <linux/memory.h> |
KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 10 | #include <linux/vmstat.h> |
Andrew Morton | 6e259e7 | 2013-04-29 15:08:07 -0700 | [diff] [blame] | 11 | #include <linux/notifier.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/node.h> |
| 13 | #include <linux/hugetlb.h> |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 14 | #include <linux/compaction.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/cpumask.h> |
| 16 | #include <linux/topology.h> |
| 17 | #include <linux/nodemask.h> |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 18 | #include <linux/cpu.h> |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 19 | #include <linux/device.h> |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 20 | #include <linux/pm_runtime.h> |
Lee Schermerhorn | af936a1 | 2008-10-18 20:26:53 -0700 | [diff] [blame] | 21 | #include <linux/swap.h> |
Tejun Heo | 18e5b53 | 2010-04-06 19:23:33 +0900 | [diff] [blame] | 22 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 24 | static struct bus_type node_subsys = { |
Kay Sievers | af5ca3f4 | 2007-12-20 02:09:39 +0100 | [diff] [blame] | 25 | .name = "node", |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 26 | .dev_name = "node", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | }; |
| 28 | |
Tian Tao | 75bd50f | 2021-08-06 23:02:50 +1200 | [diff] [blame] | 29 | static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj, |
| 30 | struct bin_attribute *attr, char *buf, |
| 31 | loff_t off, size_t count) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | { |
Tian Tao | 75bd50f | 2021-08-06 23:02:50 +1200 | [diff] [blame] | 33 | struct device *dev = kobj_to_dev(kobj); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | struct node *node_dev = to_node(dev); |
Tian Tao | 75bd50f | 2021-08-06 23:02:50 +1200 | [diff] [blame] | 35 | cpumask_var_t mask; |
| 36 | ssize_t n; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
Zhen Lei | 064f0e9 | 2017-10-13 15:57:50 -0700 | [diff] [blame] | 38 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 39 | return 0; |
| 40 | |
| 41 | cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask); |
Tian Tao | 75bd50f | 2021-08-06 23:02:50 +1200 | [diff] [blame] | 42 | n = cpumap_print_bitmask_to_buf(buf, mask, off, count); |
Zhen Lei | 064f0e9 | 2017-10-13 15:57:50 -0700 | [diff] [blame] | 43 | free_cpumask_var(mask); |
| 44 | |
| 45 | return n; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | } |
| 47 | |
Tian Tao | 75bd50f | 2021-08-06 23:02:50 +1200 | [diff] [blame] | 48 | static BIN_ATTR_RO(cpumap, 0); |
| 49 | |
| 50 | static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj, |
| 51 | struct bin_attribute *attr, char *buf, |
| 52 | loff_t off, size_t count) |
Mike Travis | 39106dc | 2008-04-08 11:43:03 -0700 | [diff] [blame] | 53 | { |
Tian Tao | 75bd50f | 2021-08-06 23:02:50 +1200 | [diff] [blame] | 54 | struct device *dev = kobj_to_dev(kobj); |
| 55 | struct node *node_dev = to_node(dev); |
| 56 | cpumask_var_t mask; |
| 57 | ssize_t n; |
| 58 | |
| 59 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 60 | return 0; |
| 61 | |
| 62 | cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask); |
| 63 | n = cpumap_print_list_to_buf(buf, mask, off, count); |
| 64 | free_cpumask_var(mask); |
| 65 | |
| 66 | return n; |
Mike Travis | 39106dc | 2008-04-08 11:43:03 -0700 | [diff] [blame] | 67 | } |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 68 | |
Tian Tao | 75bd50f | 2021-08-06 23:02:50 +1200 | [diff] [blame] | 69 | static BIN_ATTR_RO(cpulist, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 71 | /** |
| 72 | * struct node_access_nodes - Access class device to hold user visible |
| 73 | * relationships to other nodes. |
| 74 | * @dev: Device for this memory access class |
| 75 | * @list_node: List element in the node's access list |
| 76 | * @access: The access class rank |
Mauro Carvalho Chehab | 58cb346 | 2019-06-18 15:55:12 -0300 | [diff] [blame] | 77 | * @hmem_attrs: Heterogeneous memory performance attributes |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 78 | */ |
| 79 | struct node_access_nodes { |
| 80 | struct device dev; |
| 81 | struct list_head list_node; |
Jinchao Wang | e7deeb9 | 2021-06-29 01:19:07 +0800 | [diff] [blame] | 82 | unsigned int access; |
Keith Busch | e1cf33a | 2019-03-11 14:56:01 -0600 | [diff] [blame] | 83 | #ifdef CONFIG_HMEM_REPORTING |
| 84 | struct node_hmem_attrs hmem_attrs; |
| 85 | #endif |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 86 | }; |
| 87 | #define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev) |
| 88 | |
| 89 | static struct attribute *node_init_access_node_attrs[] = { |
| 90 | NULL, |
| 91 | }; |
| 92 | |
| 93 | static struct attribute *node_targ_access_node_attrs[] = { |
| 94 | NULL, |
| 95 | }; |
| 96 | |
| 97 | static const struct attribute_group initiators = { |
| 98 | .name = "initiators", |
| 99 | .attrs = node_init_access_node_attrs, |
| 100 | }; |
| 101 | |
| 102 | static const struct attribute_group targets = { |
| 103 | .name = "targets", |
| 104 | .attrs = node_targ_access_node_attrs, |
| 105 | }; |
| 106 | |
| 107 | static const struct attribute_group *node_access_node_groups[] = { |
| 108 | &initiators, |
| 109 | &targets, |
| 110 | NULL, |
| 111 | }; |
| 112 | |
| 113 | static void node_remove_accesses(struct node *node) |
| 114 | { |
| 115 | struct node_access_nodes *c, *cnext; |
| 116 | |
| 117 | list_for_each_entry_safe(c, cnext, &node->access_list, list_node) { |
| 118 | list_del(&c->list_node); |
| 119 | device_unregister(&c->dev); |
| 120 | } |
| 121 | } |
| 122 | |
| 123 | static void node_access_release(struct device *dev) |
| 124 | { |
| 125 | kfree(to_access_nodes(dev)); |
| 126 | } |
| 127 | |
| 128 | static struct node_access_nodes *node_init_node_access(struct node *node, |
Jinchao Wang | e7deeb9 | 2021-06-29 01:19:07 +0800 | [diff] [blame] | 129 | unsigned int access) |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 130 | { |
| 131 | struct node_access_nodes *access_node; |
| 132 | struct device *dev; |
| 133 | |
| 134 | list_for_each_entry(access_node, &node->access_list, list_node) |
| 135 | if (access_node->access == access) |
| 136 | return access_node; |
| 137 | |
| 138 | access_node = kzalloc(sizeof(*access_node), GFP_KERNEL); |
| 139 | if (!access_node) |
| 140 | return NULL; |
| 141 | |
| 142 | access_node->access = access; |
| 143 | dev = &access_node->dev; |
| 144 | dev->parent = &node->dev; |
| 145 | dev->release = node_access_release; |
| 146 | dev->groups = node_access_node_groups; |
| 147 | if (dev_set_name(dev, "access%u", access)) |
| 148 | goto free; |
| 149 | |
| 150 | if (device_register(dev)) |
| 151 | goto free_name; |
| 152 | |
| 153 | pm_runtime_no_callbacks(dev); |
| 154 | list_add_tail(&access_node->list_node, &node->access_list); |
| 155 | return access_node; |
| 156 | free_name: |
| 157 | kfree_const(dev->kobj.name); |
| 158 | free: |
| 159 | kfree(access_node); |
| 160 | return NULL; |
| 161 | } |
| 162 | |
Keith Busch | e1cf33a | 2019-03-11 14:56:01 -0600 | [diff] [blame] | 163 | #ifdef CONFIG_HMEM_REPORTING |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 164 | #define ACCESS_ATTR(name) \ |
| 165 | static ssize_t name##_show(struct device *dev, \ |
| 166 | struct device_attribute *attr, \ |
| 167 | char *buf) \ |
| 168 | { \ |
| 169 | return sysfs_emit(buf, "%u\n", \ |
| 170 | to_access_nodes(dev)->hmem_attrs.name); \ |
| 171 | } \ |
Joe Perches | 6284a6e | 2020-09-16 13:40:45 -0700 | [diff] [blame] | 172 | static DEVICE_ATTR_RO(name) |
Keith Busch | e1cf33a | 2019-03-11 14:56:01 -0600 | [diff] [blame] | 173 | |
Joe Perches | 6284a6e | 2020-09-16 13:40:45 -0700 | [diff] [blame] | 174 | ACCESS_ATTR(read_bandwidth); |
| 175 | ACCESS_ATTR(read_latency); |
| 176 | ACCESS_ATTR(write_bandwidth); |
| 177 | ACCESS_ATTR(write_latency); |
Keith Busch | e1cf33a | 2019-03-11 14:56:01 -0600 | [diff] [blame] | 178 | |
| 179 | static struct attribute *access_attrs[] = { |
| 180 | &dev_attr_read_bandwidth.attr, |
| 181 | &dev_attr_read_latency.attr, |
| 182 | &dev_attr_write_bandwidth.attr, |
| 183 | &dev_attr_write_latency.attr, |
| 184 | NULL, |
| 185 | }; |
| 186 | |
| 187 | /** |
| 188 | * node_set_perf_attrs - Set the performance values for given access class |
| 189 | * @nid: Node identifier to be set |
| 190 | * @hmem_attrs: Heterogeneous memory performance attributes |
| 191 | * @access: The access class the for the given attributes |
| 192 | */ |
| 193 | void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs, |
Jinchao Wang | e7deeb9 | 2021-06-29 01:19:07 +0800 | [diff] [blame] | 194 | unsigned int access) |
Keith Busch | e1cf33a | 2019-03-11 14:56:01 -0600 | [diff] [blame] | 195 | { |
| 196 | struct node_access_nodes *c; |
| 197 | struct node *node; |
| 198 | int i; |
| 199 | |
| 200 | if (WARN_ON_ONCE(!node_online(nid))) |
| 201 | return; |
| 202 | |
| 203 | node = node_devices[nid]; |
| 204 | c = node_init_node_access(node, access); |
| 205 | if (!c) |
| 206 | return; |
| 207 | |
| 208 | c->hmem_attrs = *hmem_attrs; |
| 209 | for (i = 0; access_attrs[i] != NULL; i++) { |
| 210 | if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i], |
| 211 | "initiators")) { |
| 212 | pr_info("failed to add performance attribute to node %d\n", |
| 213 | nid); |
| 214 | break; |
| 215 | } |
| 216 | } |
| 217 | } |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 218 | |
| 219 | /** |
| 220 | * struct node_cache_info - Internal tracking for memory node caches |
| 221 | * @dev: Device represeting the cache level |
| 222 | * @node: List element for tracking in the node |
| 223 | * @cache_attrs:Attributes for this cache level |
| 224 | */ |
| 225 | struct node_cache_info { |
| 226 | struct device dev; |
| 227 | struct list_head node; |
| 228 | struct node_cache_attrs cache_attrs; |
| 229 | }; |
| 230 | #define to_cache_info(device) container_of(device, struct node_cache_info, dev) |
| 231 | |
| 232 | #define CACHE_ATTR(name, fmt) \ |
| 233 | static ssize_t name##_show(struct device *dev, \ |
| 234 | struct device_attribute *attr, \ |
| 235 | char *buf) \ |
| 236 | { \ |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 237 | return sysfs_emit(buf, fmt "\n", \ |
| 238 | to_cache_info(dev)->cache_attrs.name); \ |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 239 | } \ |
Ruiqi Gong | fd03c07 | 2021-05-14 10:05:48 +0800 | [diff] [blame] | 240 | static DEVICE_ATTR_RO(name); |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 241 | |
| 242 | CACHE_ATTR(size, "%llu") |
| 243 | CACHE_ATTR(line_size, "%u") |
| 244 | CACHE_ATTR(indexing, "%u") |
| 245 | CACHE_ATTR(write_policy, "%u") |
| 246 | |
| 247 | static struct attribute *cache_attrs[] = { |
| 248 | &dev_attr_indexing.attr, |
| 249 | &dev_attr_size.attr, |
| 250 | &dev_attr_line_size.attr, |
| 251 | &dev_attr_write_policy.attr, |
| 252 | NULL, |
| 253 | }; |
| 254 | ATTRIBUTE_GROUPS(cache); |
| 255 | |
| 256 | static void node_cache_release(struct device *dev) |
| 257 | { |
| 258 | kfree(dev); |
| 259 | } |
| 260 | |
| 261 | static void node_cacheinfo_release(struct device *dev) |
| 262 | { |
| 263 | struct node_cache_info *info = to_cache_info(dev); |
| 264 | kfree(info); |
| 265 | } |
| 266 | |
| 267 | static void node_init_cache_dev(struct node *node) |
| 268 | { |
| 269 | struct device *dev; |
| 270 | |
| 271 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
| 272 | if (!dev) |
| 273 | return; |
| 274 | |
Dan Carpenter | 4ce535e | 2021-04-09 14:01:57 +0300 | [diff] [blame] | 275 | device_initialize(dev); |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 276 | dev->parent = &node->dev; |
| 277 | dev->release = node_cache_release; |
| 278 | if (dev_set_name(dev, "memory_side_cache")) |
Dan Carpenter | 4ce535e | 2021-04-09 14:01:57 +0300 | [diff] [blame] | 279 | goto put_device; |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 280 | |
Dan Carpenter | 4ce535e | 2021-04-09 14:01:57 +0300 | [diff] [blame] | 281 | if (device_add(dev)) |
| 282 | goto put_device; |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 283 | |
| 284 | pm_runtime_no_callbacks(dev); |
| 285 | node->cache_dev = dev; |
| 286 | return; |
Dan Carpenter | 4ce535e | 2021-04-09 14:01:57 +0300 | [diff] [blame] | 287 | put_device: |
| 288 | put_device(dev); |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 289 | } |
| 290 | |
| 291 | /** |
| 292 | * node_add_cache() - add cache attribute to a memory node |
| 293 | * @nid: Node identifier that has new cache attributes |
| 294 | * @cache_attrs: Attributes for the cache being added |
| 295 | */ |
| 296 | void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs) |
| 297 | { |
| 298 | struct node_cache_info *info; |
| 299 | struct device *dev; |
| 300 | struct node *node; |
| 301 | |
| 302 | if (!node_online(nid) || !node_devices[nid]) |
| 303 | return; |
| 304 | |
| 305 | node = node_devices[nid]; |
| 306 | list_for_each_entry(info, &node->cache_attrs, node) { |
| 307 | if (info->cache_attrs.level == cache_attrs->level) { |
| 308 | dev_warn(&node->dev, |
| 309 | "attempt to add duplicate cache level:%d\n", |
| 310 | cache_attrs->level); |
| 311 | return; |
| 312 | } |
| 313 | } |
| 314 | |
| 315 | if (!node->cache_dev) |
| 316 | node_init_cache_dev(node); |
| 317 | if (!node->cache_dev) |
| 318 | return; |
| 319 | |
| 320 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
| 321 | if (!info) |
| 322 | return; |
| 323 | |
| 324 | dev = &info->dev; |
Dan Carpenter | 4ce535e | 2021-04-09 14:01:57 +0300 | [diff] [blame] | 325 | device_initialize(dev); |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 326 | dev->parent = node->cache_dev; |
| 327 | dev->release = node_cacheinfo_release; |
| 328 | dev->groups = cache_groups; |
| 329 | if (dev_set_name(dev, "index%d", cache_attrs->level)) |
Dan Carpenter | 4ce535e | 2021-04-09 14:01:57 +0300 | [diff] [blame] | 330 | goto put_device; |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 331 | |
| 332 | info->cache_attrs = *cache_attrs; |
Dan Carpenter | 4ce535e | 2021-04-09 14:01:57 +0300 | [diff] [blame] | 333 | if (device_add(dev)) { |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 334 | dev_warn(&node->dev, "failed to add cache level:%d\n", |
| 335 | cache_attrs->level); |
Dan Carpenter | 4ce535e | 2021-04-09 14:01:57 +0300 | [diff] [blame] | 336 | goto put_device; |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 337 | } |
| 338 | pm_runtime_no_callbacks(dev); |
| 339 | list_add_tail(&info->node, &node->cache_attrs); |
| 340 | return; |
Dan Carpenter | 4ce535e | 2021-04-09 14:01:57 +0300 | [diff] [blame] | 341 | put_device: |
| 342 | put_device(dev); |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 343 | } |
| 344 | |
| 345 | static void node_remove_caches(struct node *node) |
| 346 | { |
| 347 | struct node_cache_info *info, *next; |
| 348 | |
| 349 | if (!node->cache_dev) |
| 350 | return; |
| 351 | |
| 352 | list_for_each_entry_safe(info, next, &node->cache_attrs, node) { |
| 353 | list_del(&info->node); |
| 354 | device_unregister(&info->dev); |
| 355 | } |
| 356 | device_unregister(node->cache_dev); |
| 357 | } |
| 358 | |
| 359 | static void node_init_caches(unsigned int nid) |
| 360 | { |
| 361 | INIT_LIST_HEAD(&node_devices[nid]->cache_attrs); |
| 362 | } |
| 363 | #else |
| 364 | static void node_init_caches(unsigned int nid) { } |
| 365 | static void node_remove_caches(struct node *node) { } |
Keith Busch | e1cf33a | 2019-03-11 14:56:01 -0600 | [diff] [blame] | 366 | #endif |
| 367 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | #define K(x) ((x) << (PAGE_SHIFT - 10)) |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 369 | static ssize_t node_read_meminfo(struct device *dev, |
| 370 | struct device_attribute *attr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | { |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 372 | int len = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | int nid = dev->id; |
Mel Gorman | 599d0c9 | 2016-07-28 15:45:31 -0700 | [diff] [blame] | 374 | struct pglist_data *pgdat = NODE_DATA(nid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | struct sysinfo i; |
Vlastimil Babka | 61f94e1 | 2018-10-26 15:05:50 -0700 | [diff] [blame] | 376 | unsigned long sreclaimable, sunreclaimable; |
Shakeel Butt | b603894 | 2021-02-24 12:03:55 -0800 | [diff] [blame] | 377 | unsigned long swapcached = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | |
| 379 | si_meminfo_node(&i, nid); |
Roman Gushchin | d42f324 | 2020-08-06 23:20:39 -0700 | [diff] [blame] | 380 | sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B); |
| 381 | sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B); |
Shakeel Butt | b603894 | 2021-02-24 12:03:55 -0800 | [diff] [blame] | 382 | #ifdef CONFIG_SWAP |
| 383 | swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE); |
| 384 | #endif |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 385 | len = sysfs_emit_at(buf, len, |
| 386 | "Node %d MemTotal: %8lu kB\n" |
| 387 | "Node %d MemFree: %8lu kB\n" |
| 388 | "Node %d MemUsed: %8lu kB\n" |
Shakeel Butt | b603894 | 2021-02-24 12:03:55 -0800 | [diff] [blame] | 389 | "Node %d SwapCached: %8lu kB\n" |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 390 | "Node %d Active: %8lu kB\n" |
| 391 | "Node %d Inactive: %8lu kB\n" |
| 392 | "Node %d Active(anon): %8lu kB\n" |
| 393 | "Node %d Inactive(anon): %8lu kB\n" |
| 394 | "Node %d Active(file): %8lu kB\n" |
| 395 | "Node %d Inactive(file): %8lu kB\n" |
| 396 | "Node %d Unevictable: %8lu kB\n" |
| 397 | "Node %d Mlocked: %8lu kB\n", |
| 398 | nid, K(i.totalram), |
| 399 | nid, K(i.freeram), |
| 400 | nid, K(i.totalram - i.freeram), |
Shakeel Butt | b603894 | 2021-02-24 12:03:55 -0800 | [diff] [blame] | 401 | nid, K(swapcached), |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 402 | nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) + |
| 403 | node_page_state(pgdat, NR_ACTIVE_FILE)), |
| 404 | nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) + |
| 405 | node_page_state(pgdat, NR_INACTIVE_FILE)), |
| 406 | nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)), |
| 407 | nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)), |
| 408 | nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)), |
| 409 | nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)), |
| 410 | nid, K(node_page_state(pgdat, NR_UNEVICTABLE)), |
| 411 | nid, K(sum_zone_node_page_state(nid, NR_MLOCK))); |
KOSAKI Motohiro | 7ee9225 | 2010-08-09 17:19:50 -0700 | [diff] [blame] | 412 | |
Christoph Lameter | 182e8e2 | 2006-09-25 23:31:10 -0700 | [diff] [blame] | 413 | #ifdef CONFIG_HIGHMEM |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 414 | len += sysfs_emit_at(buf, len, |
| 415 | "Node %d HighTotal: %8lu kB\n" |
| 416 | "Node %d HighFree: %8lu kB\n" |
| 417 | "Node %d LowTotal: %8lu kB\n" |
| 418 | "Node %d LowFree: %8lu kB\n", |
| 419 | nid, K(i.totalhigh), |
| 420 | nid, K(i.freehigh), |
| 421 | nid, K(i.totalram - i.totalhigh), |
| 422 | nid, K(i.freeram - i.freehigh)); |
Christoph Lameter | 182e8e2 | 2006-09-25 23:31:10 -0700 | [diff] [blame] | 423 | #endif |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 424 | len += sysfs_emit_at(buf, len, |
| 425 | "Node %d Dirty: %8lu kB\n" |
| 426 | "Node %d Writeback: %8lu kB\n" |
| 427 | "Node %d FilePages: %8lu kB\n" |
| 428 | "Node %d Mapped: %8lu kB\n" |
| 429 | "Node %d AnonPages: %8lu kB\n" |
| 430 | "Node %d Shmem: %8lu kB\n" |
| 431 | "Node %d KernelStack: %8lu kB\n" |
Sami Tolvanen | 628d06a | 2020-04-27 09:00:08 -0700 | [diff] [blame] | 432 | #ifdef CONFIG_SHADOW_CALL_STACK |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 433 | "Node %d ShadowCallStack:%8lu kB\n" |
Sami Tolvanen | 628d06a | 2020-04-27 09:00:08 -0700 | [diff] [blame] | 434 | #endif |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 435 | "Node %d PageTables: %8lu kB\n" |
| 436 | "Node %d NFS_Unstable: %8lu kB\n" |
| 437 | "Node %d Bounce: %8lu kB\n" |
| 438 | "Node %d WritebackTmp: %8lu kB\n" |
| 439 | "Node %d KReclaimable: %8lu kB\n" |
| 440 | "Node %d Slab: %8lu kB\n" |
| 441 | "Node %d SReclaimable: %8lu kB\n" |
| 442 | "Node %d SUnreclaim: %8lu kB\n" |
David Rientjes | 05b258e | 2011-01-13 15:47:14 -0800 | [diff] [blame] | 443 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 444 | "Node %d AnonHugePages: %8lu kB\n" |
| 445 | "Node %d ShmemHugePages: %8lu kB\n" |
| 446 | "Node %d ShmemPmdMapped: %8lu kB\n" |
| 447 | "Node %d FileHugePages: %8lu kB\n" |
| 448 | "Node %d FilePmdMapped: %8lu kB\n" |
David Rientjes | 05b258e | 2011-01-13 15:47:14 -0800 | [diff] [blame] | 449 | #endif |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 450 | , |
| 451 | nid, K(node_page_state(pgdat, NR_FILE_DIRTY)), |
| 452 | nid, K(node_page_state(pgdat, NR_WRITEBACK)), |
| 453 | nid, K(node_page_state(pgdat, NR_FILE_PAGES)), |
| 454 | nid, K(node_page_state(pgdat, NR_FILE_MAPPED)), |
| 455 | nid, K(node_page_state(pgdat, NR_ANON_MAPPED)), |
| 456 | nid, K(i.sharedram), |
| 457 | nid, node_page_state(pgdat, NR_KERNEL_STACK_KB), |
Sami Tolvanen | 628d06a | 2020-04-27 09:00:08 -0700 | [diff] [blame] | 458 | #ifdef CONFIG_SHADOW_CALL_STACK |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 459 | nid, node_page_state(pgdat, NR_KERNEL_SCS_KB), |
Sami Tolvanen | 628d06a | 2020-04-27 09:00:08 -0700 | [diff] [blame] | 460 | #endif |
Shakeel Butt | f0c0c11 | 2020-12-14 19:07:17 -0800 | [diff] [blame] | 461 | nid, K(node_page_state(pgdat, NR_PAGETABLE)), |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 462 | nid, 0UL, |
| 463 | nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), |
| 464 | nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), |
| 465 | nid, K(sreclaimable + |
| 466 | node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)), |
| 467 | nid, K(sreclaimable + sunreclaimable), |
| 468 | nid, K(sreclaimable), |
| 469 | nid, K(sunreclaimable) |
David Rientjes | 05b258e | 2011-01-13 15:47:14 -0800 | [diff] [blame] | 470 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 471 | , |
Muchun Song | 69473e5 | 2021-02-24 12:03:23 -0800 | [diff] [blame] | 472 | nid, K(node_page_state(pgdat, NR_ANON_THPS)), |
Muchun Song | 57b2847 | 2021-02-24 12:03:31 -0800 | [diff] [blame] | 473 | nid, K(node_page_state(pgdat, NR_SHMEM_THPS)), |
Muchun Song | a1528e2 | 2021-02-24 12:03:35 -0800 | [diff] [blame] | 474 | nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), |
Muchun Song | bf9ecea | 2021-02-24 12:03:27 -0800 | [diff] [blame] | 475 | nid, K(node_page_state(pgdat, NR_FILE_THPS)), |
Muchun Song | 380780e | 2021-02-24 12:03:39 -0800 | [diff] [blame] | 476 | nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED)) |
David Rientjes | 05b258e | 2011-01-13 15:47:14 -0800 | [diff] [blame] | 477 | #endif |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 478 | ); |
Joe Perches | 7981593 | 2020-09-16 13:40:43 -0700 | [diff] [blame] | 479 | len += hugetlb_report_node_meminfo(buf, len, nid); |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 480 | return len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | } |
| 482 | |
| 483 | #undef K |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 484 | static DEVICE_ATTR(meminfo, 0444, node_read_meminfo, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 486 | static ssize_t node_read_numastat(struct device *dev, |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 487 | struct device_attribute *attr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | { |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 489 | fold_vm_numa_events(); |
Joe Perches | aa83889 | 2020-09-16 13:40:39 -0700 | [diff] [blame] | 490 | return sysfs_emit(buf, |
| 491 | "numa_hit %lu\n" |
| 492 | "numa_miss %lu\n" |
| 493 | "numa_foreign %lu\n" |
| 494 | "interleave_hit %lu\n" |
| 495 | "local_node %lu\n" |
| 496 | "other_node %lu\n", |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 497 | sum_zone_numa_event_state(dev->id, NUMA_HIT), |
| 498 | sum_zone_numa_event_state(dev->id, NUMA_MISS), |
| 499 | sum_zone_numa_event_state(dev->id, NUMA_FOREIGN), |
| 500 | sum_zone_numa_event_state(dev->id, NUMA_INTERLEAVE_HIT), |
| 501 | sum_zone_numa_event_state(dev->id, NUMA_LOCAL), |
| 502 | sum_zone_numa_event_state(dev->id, NUMA_OTHER)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | } |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 504 | static DEVICE_ATTR(numastat, 0444, node_read_numastat, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 506 | static ssize_t node_read_vmstat(struct device *dev, |
| 507 | struct device_attribute *attr, char *buf) |
Michael Rubin | 2ac3903 | 2010-10-26 14:21:35 -0700 | [diff] [blame] | 508 | { |
| 509 | int nid = dev->id; |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 510 | struct pglist_data *pgdat = NODE_DATA(nid); |
KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 511 | int i; |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 512 | int len = 0; |
KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 513 | |
| 514 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 515 | len += sysfs_emit_at(buf, len, "%s %lu\n", |
| 516 | zone_stat_name(i), |
| 517 | sum_zone_node_page_state(nid, i)); |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 518 | |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 519 | #ifdef CONFIG_NUMA |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 520 | fold_vm_numa_events(); |
| 521 | for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 522 | len += sysfs_emit_at(buf, len, "%s %lu\n", |
| 523 | numa_stat_name(i), |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 524 | sum_zone_numa_event_state(nid, i)); |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 525 | |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 526 | #endif |
Muchun Song | 69473e5 | 2021-02-24 12:03:23 -0800 | [diff] [blame] | 527 | for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { |
| 528 | unsigned long pages = node_page_state_pages(pgdat, i); |
| 529 | |
| 530 | if (vmstat_item_print_in_thp(i)) |
| 531 | pages /= HPAGE_PMD_NR; |
| 532 | len += sysfs_emit_at(buf, len, "%s %lu\n", node_stat_name(i), |
| 533 | pages); |
| 534 | } |
KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 535 | |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 536 | return len; |
Michael Rubin | 2ac3903 | 2010-10-26 14:21:35 -0700 | [diff] [blame] | 537 | } |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 538 | static DEVICE_ATTR(vmstat, 0444, node_read_vmstat, NULL); |
Michael Rubin | 2ac3903 | 2010-10-26 14:21:35 -0700 | [diff] [blame] | 539 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 540 | static ssize_t node_read_distance(struct device *dev, |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 541 | struct device_attribute *attr, char *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | { |
| 543 | int nid = dev->id; |
| 544 | int len = 0; |
| 545 | int i; |
| 546 | |
David Rientjes | 12ee3c0 | 2010-03-10 14:50:21 -0800 | [diff] [blame] | 547 | /* |
| 548 | * buf is currently PAGE_SIZE in length and each node needs 4 chars |
| 549 | * at the most (distance + space or newline). |
| 550 | */ |
| 551 | BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 553 | for_each_online_node(i) { |
| 554 | len += sysfs_emit_at(buf, len, "%s%d", |
| 555 | i ? " " : "", node_distance(nid, i)); |
| 556 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 558 | len += sysfs_emit_at(buf, len, "\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | return len; |
| 560 | } |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 561 | static DEVICE_ATTR(distance, 0444, node_read_distance, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | |
Takashi Iwai | 3c9b8aa | 2015-01-29 12:29:22 +0100 | [diff] [blame] | 563 | static struct attribute *node_dev_attrs[] = { |
Takashi Iwai | 3c9b8aa | 2015-01-29 12:29:22 +0100 | [diff] [blame] | 564 | &dev_attr_meminfo.attr, |
| 565 | &dev_attr_numastat.attr, |
| 566 | &dev_attr_distance.attr, |
| 567 | &dev_attr_vmstat.attr, |
| 568 | NULL |
| 569 | }; |
Tian Tao | 75bd50f | 2021-08-06 23:02:50 +1200 | [diff] [blame] | 570 | |
| 571 | static struct bin_attribute *node_dev_bin_attrs[] = { |
| 572 | &bin_attr_cpumap, |
| 573 | &bin_attr_cpulist, |
| 574 | NULL |
| 575 | }; |
| 576 | |
| 577 | static const struct attribute_group node_dev_group = { |
| 578 | .attrs = node_dev_attrs, |
| 579 | .bin_attrs = node_dev_bin_attrs |
| 580 | }; |
| 581 | |
| 582 | static const struct attribute_group *node_dev_groups[] = { |
| 583 | &node_dev_group, |
Jarkko Sakkinen | 50468e4 | 2021-11-16 18:21:16 +0200 | [diff] [blame] | 584 | #ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP |
| 585 | &arch_node_dev_group, |
| 586 | #endif |
Tian Tao | 75bd50f | 2021-08-06 23:02:50 +1200 | [diff] [blame] | 587 | NULL |
| 588 | }; |
Takashi Iwai | 3c9b8aa | 2015-01-29 12:29:22 +0100 | [diff] [blame] | 589 | |
Lee Schermerhorn | 9a305230 | 2009-12-14 17:58:25 -0800 | [diff] [blame] | 590 | #ifdef CONFIG_HUGETLBFS |
| 591 | /* |
| 592 | * hugetlbfs per node attributes registration interface: |
| 593 | * When/if hugetlb[fs] subsystem initializes [sometime after this module], |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 594 | * it will register its per node attributes for all online nodes with |
| 595 | * memory. It will also call register_hugetlbfs_with_node(), below, to |
Lee Schermerhorn | 9a305230 | 2009-12-14 17:58:25 -0800 | [diff] [blame] | 596 | * register its attribute registration functions with this node driver. |
| 597 | * Once these hooks have been initialized, the node driver will call into |
| 598 | * the hugetlb module to [un]register attributes for hot-plugged nodes. |
| 599 | */ |
| 600 | static node_registration_func_t __hugetlb_register_node; |
| 601 | static node_registration_func_t __hugetlb_unregister_node; |
| 602 | |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 603 | static inline bool hugetlb_register_node(struct node *node) |
Lee Schermerhorn | 9a305230 | 2009-12-14 17:58:25 -0800 | [diff] [blame] | 604 | { |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 605 | if (__hugetlb_register_node && |
Lai Jiangshan | 8cebfcd | 2012-12-12 13:51:36 -0800 | [diff] [blame] | 606 | node_state(node->dev.id, N_MEMORY)) { |
Lee Schermerhorn | 9a305230 | 2009-12-14 17:58:25 -0800 | [diff] [blame] | 607 | __hugetlb_register_node(node); |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 608 | return true; |
| 609 | } |
| 610 | return false; |
Lee Schermerhorn | 9a305230 | 2009-12-14 17:58:25 -0800 | [diff] [blame] | 611 | } |
| 612 | |
| 613 | static inline void hugetlb_unregister_node(struct node *node) |
| 614 | { |
| 615 | if (__hugetlb_unregister_node) |
| 616 | __hugetlb_unregister_node(node); |
| 617 | } |
| 618 | |
| 619 | void register_hugetlbfs_with_node(node_registration_func_t doregister, |
| 620 | node_registration_func_t unregister) |
| 621 | { |
| 622 | __hugetlb_register_node = doregister; |
| 623 | __hugetlb_unregister_node = unregister; |
| 624 | } |
| 625 | #else |
| 626 | static inline void hugetlb_register_node(struct node *node) {} |
| 627 | |
| 628 | static inline void hugetlb_unregister_node(struct node *node) {} |
| 629 | #endif |
| 630 | |
Yasuaki Ishimatsu | 8c7b5b4 | 2012-12-11 16:00:57 -0800 | [diff] [blame] | 631 | static void node_device_release(struct device *dev) |
| 632 | { |
| 633 | struct node *node = to_node(dev); |
| 634 | |
David Hildenbrand | 50f9481 | 2021-11-05 13:44:24 -0700 | [diff] [blame] | 635 | #if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_HUGETLBFS) |
Yasuaki Ishimatsu | 8c7b5b4 | 2012-12-11 16:00:57 -0800 | [diff] [blame] | 636 | /* |
| 637 | * We schedule the work only when a memory section is |
| 638 | * onlined/offlined on this node. When we come here, |
| 639 | * all the memory on this node has been offlined, |
| 640 | * so we won't enqueue new work to this work. |
| 641 | * |
| 642 | * The work is using node->node_work, so we should |
| 643 | * flush work before freeing the memory. |
| 644 | */ |
| 645 | flush_work(&node->node_work); |
| 646 | #endif |
| 647 | kfree(node); |
| 648 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | |
| 650 | /* |
Robert P. J. Day | 405ae7d | 2007-02-17 19:13:42 +0100 | [diff] [blame] | 651 | * register_node - Setup a sysfs device for a node. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | * @num - Node number to use when creating the device. |
| 653 | * |
| 654 | * Initialize and register the node device. |
| 655 | */ |
Dou Liyang | a7be6e5 | 2017-07-10 15:49:20 -0700 | [diff] [blame] | 656 | static int register_node(struct node *node, int num) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | { |
| 658 | int error; |
| 659 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 660 | node->dev.id = num; |
| 661 | node->dev.bus = &node_subsys; |
Yasuaki Ishimatsu | 8c7b5b4 | 2012-12-11 16:00:57 -0800 | [diff] [blame] | 662 | node->dev.release = node_device_release; |
Greg Kroah-Hartman | 7ca7ec4 | 2015-03-25 13:47:17 +0100 | [diff] [blame] | 663 | node->dev.groups = node_dev_groups; |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 664 | error = device_register(&node->dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | |
Arvind Yadav | c1cc0d5 | 2018-03-11 11:25:50 +0530 | [diff] [blame] | 666 | if (error) |
| 667 | put_device(&node->dev); |
| 668 | else { |
Lee Schermerhorn | 9a305230 | 2009-12-14 17:58:25 -0800 | [diff] [blame] | 669 | hugetlb_register_node(node); |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 670 | |
| 671 | compaction_register_node(node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | } |
| 673 | return error; |
| 674 | } |
| 675 | |
Keiichiro Tokunaga | 4b45099 | 2005-05-08 21:28:53 +0900 | [diff] [blame] | 676 | /** |
| 677 | * unregister_node - unregister a node device |
| 678 | * @node: node going away |
| 679 | * |
| 680 | * Unregisters a node device @node. All the devices on the node must be |
| 681 | * unregistered before calling this function. |
| 682 | */ |
| 683 | void unregister_node(struct node *node) |
| 684 | { |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 685 | hugetlb_unregister_node(node); /* no-op, if memoryless node */ |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 686 | node_remove_accesses(node); |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 687 | node_remove_caches(node); |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 688 | device_unregister(&node->dev); |
Keiichiro Tokunaga | 4b45099 | 2005-05-08 21:28:53 +0900 | [diff] [blame] | 689 | } |
| 690 | |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 691 | struct node *node_devices[MAX_NUMNODES]; |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 692 | |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 693 | /* |
| 694 | * register cpu under node |
| 695 | */ |
| 696 | int register_cpu_under_node(unsigned int cpu, unsigned int nid) |
| 697 | { |
Alex Chiang | 1830794 | 2009-12-14 17:59:08 -0800 | [diff] [blame] | 698 | int ret; |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 699 | struct device *obj; |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 700 | |
Alex Chiang | f8246f3 | 2009-12-14 17:59:06 -0800 | [diff] [blame] | 701 | if (!node_online(nid)) |
| 702 | return 0; |
| 703 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 704 | obj = get_cpu_device(cpu); |
Alex Chiang | f8246f3 | 2009-12-14 17:59:06 -0800 | [diff] [blame] | 705 | if (!obj) |
| 706 | return 0; |
| 707 | |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 708 | ret = sysfs_create_link(&node_devices[nid]->dev.kobj, |
Alex Chiang | f8246f3 | 2009-12-14 17:59:06 -0800 | [diff] [blame] | 709 | &obj->kobj, |
| 710 | kobject_name(&obj->kobj)); |
Alex Chiang | 1830794 | 2009-12-14 17:59:08 -0800 | [diff] [blame] | 711 | if (ret) |
| 712 | return ret; |
| 713 | |
| 714 | return sysfs_create_link(&obj->kobj, |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 715 | &node_devices[nid]->dev.kobj, |
| 716 | kobject_name(&node_devices[nid]->dev.kobj)); |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 717 | } |
| 718 | |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 719 | /** |
| 720 | * register_memory_node_under_compute_node - link memory node to its compute |
| 721 | * node for a given access class. |
Mauro Carvalho Chehab | 58cb346 | 2019-06-18 15:55:12 -0300 | [diff] [blame] | 722 | * @mem_nid: Memory node number |
| 723 | * @cpu_nid: Cpu node number |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 724 | * @access: Access class to register |
| 725 | * |
| 726 | * Description: |
| 727 | * For use with platforms that may have separate memory and compute nodes. |
| 728 | * This function will export node relationships linking which memory |
| 729 | * initiator nodes can access memory targets at a given ranked access |
| 730 | * class. |
| 731 | */ |
| 732 | int register_memory_node_under_compute_node(unsigned int mem_nid, |
| 733 | unsigned int cpu_nid, |
Jinchao Wang | e7deeb9 | 2021-06-29 01:19:07 +0800 | [diff] [blame] | 734 | unsigned int access) |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 735 | { |
| 736 | struct node *init_node, *targ_node; |
| 737 | struct node_access_nodes *initiator, *target; |
| 738 | int ret; |
| 739 | |
| 740 | if (!node_online(cpu_nid) || !node_online(mem_nid)) |
| 741 | return -ENODEV; |
| 742 | |
| 743 | init_node = node_devices[cpu_nid]; |
| 744 | targ_node = node_devices[mem_nid]; |
| 745 | initiator = node_init_node_access(init_node, access); |
| 746 | target = node_init_node_access(targ_node, access); |
| 747 | if (!initiator || !target) |
| 748 | return -ENOMEM; |
| 749 | |
| 750 | ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets", |
| 751 | &targ_node->dev.kobj, |
| 752 | dev_name(&targ_node->dev)); |
| 753 | if (ret) |
| 754 | return ret; |
| 755 | |
| 756 | ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators", |
| 757 | &init_node->dev.kobj, |
| 758 | dev_name(&init_node->dev)); |
| 759 | if (ret) |
| 760 | goto err; |
| 761 | |
| 762 | return 0; |
| 763 | err: |
| 764 | sysfs_remove_link_from_group(&initiator->dev.kobj, "targets", |
| 765 | dev_name(&targ_node->dev)); |
| 766 | return ret; |
| 767 | } |
| 768 | |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 769 | int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) |
| 770 | { |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 771 | struct device *obj; |
Alex Chiang | b9d52da | 2009-12-14 17:59:07 -0800 | [diff] [blame] | 772 | |
| 773 | if (!node_online(nid)) |
| 774 | return 0; |
| 775 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 776 | obj = get_cpu_device(cpu); |
Alex Chiang | b9d52da | 2009-12-14 17:59:07 -0800 | [diff] [blame] | 777 | if (!obj) |
| 778 | return 0; |
| 779 | |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 780 | sysfs_remove_link(&node_devices[nid]->dev.kobj, |
Alex Chiang | b9d52da | 2009-12-14 17:59:07 -0800 | [diff] [blame] | 781 | kobject_name(&obj->kobj)); |
Alex Chiang | 1830794 | 2009-12-14 17:59:08 -0800 | [diff] [blame] | 782 | sysfs_remove_link(&obj->kobj, |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 783 | kobject_name(&node_devices[nid]->dev.kobj)); |
Alex Chiang | b9d52da | 2009-12-14 17:59:07 -0800 | [diff] [blame] | 784 | |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 785 | return 0; |
| 786 | } |
| 787 | |
David Hildenbrand | 50f9481 | 2021-11-05 13:44:24 -0700 | [diff] [blame] | 788 | #ifdef CONFIG_MEMORY_HOTPLUG |
Fabian Frederick | bd721ea | 2016-08-02 14:03:33 -0700 | [diff] [blame] | 789 | static int __ref get_nid_for_pfn(unsigned long pfn) |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 790 | { |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 791 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
Thomas Gleixner | 8cdde38 | 2017-05-16 20:42:39 +0200 | [diff] [blame] | 792 | if (system_state < SYSTEM_RUNNING) |
Mel Gorman | 3a80a7f | 2015-06-30 14:57:02 -0700 | [diff] [blame] | 793 | return early_pfn_to_nid(pfn); |
| 794 | #endif |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 795 | return pfn_to_nid(pfn); |
| 796 | } |
| 797 | |
Laurent Dufour | 90c7eae | 2020-10-15 20:09:15 -0700 | [diff] [blame] | 798 | static void do_register_memory_block_under_node(int nid, |
| 799 | struct memory_block *mem_blk) |
Laurent Dufour | f85086f | 2020-09-25 21:19:31 -0700 | [diff] [blame] | 800 | { |
| 801 | int ret; |
| 802 | |
| 803 | /* |
| 804 | * If this memory block spans multiple nodes, we only indicate |
| 805 | * the last processed node. |
| 806 | */ |
| 807 | mem_blk->nid = nid; |
| 808 | |
| 809 | ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, |
| 810 | &mem_blk->dev.kobj, |
| 811 | kobject_name(&mem_blk->dev.kobj)); |
Laurent Dufour | 90c7eae | 2020-10-15 20:09:15 -0700 | [diff] [blame] | 812 | if (ret && ret != -EEXIST) |
| 813 | dev_err_ratelimited(&node_devices[nid]->dev, |
| 814 | "can't create link to %s in sysfs (%d)\n", |
| 815 | kobject_name(&mem_blk->dev.kobj), ret); |
Laurent Dufour | f85086f | 2020-09-25 21:19:31 -0700 | [diff] [blame] | 816 | |
Laurent Dufour | 90c7eae | 2020-10-15 20:09:15 -0700 | [diff] [blame] | 817 | ret = sysfs_create_link_nowarn(&mem_blk->dev.kobj, |
Laurent Dufour | f85086f | 2020-09-25 21:19:31 -0700 | [diff] [blame] | 818 | &node_devices[nid]->dev.kobj, |
| 819 | kobject_name(&node_devices[nid]->dev.kobj)); |
Laurent Dufour | 90c7eae | 2020-10-15 20:09:15 -0700 | [diff] [blame] | 820 | if (ret && ret != -EEXIST) |
| 821 | dev_err_ratelimited(&mem_blk->dev, |
| 822 | "can't create link to %s in sysfs (%d)\n", |
| 823 | kobject_name(&node_devices[nid]->dev.kobj), |
| 824 | ret); |
Laurent Dufour | f85086f | 2020-09-25 21:19:31 -0700 | [diff] [blame] | 825 | } |
| 826 | |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 827 | /* register memory section under specified node if it spans that node */ |
Laurent Dufour | f85086f | 2020-09-25 21:19:31 -0700 | [diff] [blame] | 828 | static int register_mem_block_under_node_early(struct memory_block *mem_blk, |
| 829 | void *arg) |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 830 | { |
David Hildenbrand | b6c88d3 | 2019-09-23 15:35:49 -0700 | [diff] [blame] | 831 | unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE; |
| 832 | unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); |
| 833 | unsigned long end_pfn = start_pfn + memory_block_pfns - 1; |
Laurent Dufour | f85086f | 2020-09-25 21:19:31 -0700 | [diff] [blame] | 834 | int nid = *(int *)arg; |
David Hildenbrand | b6c88d3 | 2019-09-23 15:35:49 -0700 | [diff] [blame] | 835 | unsigned long pfn; |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 836 | |
David Hildenbrand | b6c88d3 | 2019-09-23 15:35:49 -0700 | [diff] [blame] | 837 | for (pfn = start_pfn; pfn <= end_pfn; pfn++) { |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 838 | int page_nid; |
| 839 | |
Yinghai Lu | 0469785 | 2015-09-04 15:42:39 -0700 | [diff] [blame] | 840 | /* |
| 841 | * memory block could have several absent sections from start. |
| 842 | * skip pfn range from absent section |
| 843 | */ |
Pingfan Liu | e03d1f7 | 2020-04-01 21:09:27 -0700 | [diff] [blame] | 844 | if (!pfn_in_present_section(pfn)) { |
Yinghai Lu | 0469785 | 2015-09-04 15:42:39 -0700 | [diff] [blame] | 845 | pfn = round_down(pfn + PAGES_PER_SECTION, |
| 846 | PAGES_PER_SECTION) - 1; |
| 847 | continue; |
| 848 | } |
| 849 | |
Pavel Tatashin | fc44f7f | 2018-04-05 16:22:56 -0700 | [diff] [blame] | 850 | /* |
Laurent Dufour | f85086f | 2020-09-25 21:19:31 -0700 | [diff] [blame] | 851 | * We need to check if page belongs to nid only at the boot |
| 852 | * case because node's ranges can be interleaved. |
Pavel Tatashin | fc44f7f | 2018-04-05 16:22:56 -0700 | [diff] [blame] | 853 | */ |
Laurent Dufour | f85086f | 2020-09-25 21:19:31 -0700 | [diff] [blame] | 854 | page_nid = get_nid_for_pfn(pfn); |
| 855 | if (page_nid < 0) |
| 856 | continue; |
| 857 | if (page_nid != nid) |
| 858 | continue; |
David Hildenbrand | d84f2f5 | 2019-09-23 15:35:40 -0700 | [diff] [blame] | 859 | |
Laurent Dufour | 90c7eae | 2020-10-15 20:09:15 -0700 | [diff] [blame] | 860 | do_register_memory_block_under_node(nid, mem_blk); |
| 861 | return 0; |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 862 | } |
| 863 | /* mem section does not span the specified node */ |
| 864 | return 0; |
| 865 | } |
| 866 | |
David Hildenbrand | 4c4b7f9 | 2019-07-18 15:57:06 -0700 | [diff] [blame] | 867 | /* |
Laurent Dufour | f85086f | 2020-09-25 21:19:31 -0700 | [diff] [blame] | 868 | * During hotplug we know that all pages in the memory block belong to the same |
| 869 | * node. |
| 870 | */ |
| 871 | static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk, |
| 872 | void *arg) |
| 873 | { |
| 874 | int nid = *(int *)arg; |
| 875 | |
Laurent Dufour | 90c7eae | 2020-10-15 20:09:15 -0700 | [diff] [blame] | 876 | do_register_memory_block_under_node(nid, mem_blk); |
| 877 | return 0; |
Laurent Dufour | f85086f | 2020-09-25 21:19:31 -0700 | [diff] [blame] | 878 | } |
| 879 | |
| 880 | /* |
David Hildenbrand | d84f2f5 | 2019-09-23 15:35:40 -0700 | [diff] [blame] | 881 | * Unregister a memory block device under the node it spans. Memory blocks |
| 882 | * with multiple nodes cannot be offlined and therefore also never be removed. |
David Hildenbrand | 4c4b7f9 | 2019-07-18 15:57:06 -0700 | [diff] [blame] | 883 | */ |
David Hildenbrand | a31b264 | 2019-07-18 15:57:12 -0700 | [diff] [blame] | 884 | void unregister_memory_block_under_nodes(struct memory_block *mem_blk) |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 885 | { |
David Hildenbrand | d84f2f5 | 2019-09-23 15:35:40 -0700 | [diff] [blame] | 886 | if (mem_blk->nid == NUMA_NO_NODE) |
| 887 | return; |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 888 | |
David Hildenbrand | d84f2f5 | 2019-09-23 15:35:40 -0700 | [diff] [blame] | 889 | sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj, |
| 890 | kobject_name(&mem_blk->dev.kobj)); |
| 891 | sysfs_remove_link(&mem_blk->dev.kobj, |
| 892 | kobject_name(&node_devices[mem_blk->nid]->dev.kobj)); |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 893 | } |
| 894 | |
Laurent Dufour | 90c7eae | 2020-10-15 20:09:15 -0700 | [diff] [blame] | 895 | void link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn, |
| 896 | enum meminit_context context) |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 897 | { |
Laurent Dufour | f85086f | 2020-09-25 21:19:31 -0700 | [diff] [blame] | 898 | walk_memory_blocks_func_t func; |
| 899 | |
| 900 | if (context == MEMINIT_HOTPLUG) |
| 901 | func = register_mem_block_under_node_hotplug; |
| 902 | else |
| 903 | func = register_mem_block_under_node_early; |
| 904 | |
Laurent Dufour | 90c7eae | 2020-10-15 20:09:15 -0700 | [diff] [blame] | 905 | walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), |
| 906 | (void *)&nid, func); |
| 907 | return; |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 908 | } |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 909 | |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 910 | #ifdef CONFIG_HUGETLBFS |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 911 | /* |
| 912 | * Handle per node hstate attribute [un]registration on transistions |
| 913 | * to/from memoryless state. |
| 914 | */ |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 915 | static void node_hugetlb_work(struct work_struct *work) |
| 916 | { |
| 917 | struct node *node = container_of(work, struct node, node_work); |
| 918 | |
| 919 | /* |
| 920 | * We only get here when a node transitions to/from memoryless state. |
| 921 | * We can detect which transition occurred by examining whether the |
| 922 | * node has memory now. hugetlb_register_node() already check this |
| 923 | * so we try to register the attributes. If that fails, then the |
| 924 | * node has transitioned to memoryless, try to unregister the |
| 925 | * attributes. |
| 926 | */ |
| 927 | if (!hugetlb_register_node(node)) |
| 928 | hugetlb_unregister_node(node); |
| 929 | } |
| 930 | |
| 931 | static void init_node_hugetlb_work(int nid) |
| 932 | { |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 933 | INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work); |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 934 | } |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 935 | |
| 936 | static int node_memory_callback(struct notifier_block *self, |
| 937 | unsigned long action, void *arg) |
| 938 | { |
| 939 | struct memory_notify *mnb = arg; |
| 940 | int nid = mnb->status_change_nid; |
| 941 | |
| 942 | switch (action) { |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 943 | case MEM_ONLINE: |
| 944 | case MEM_OFFLINE: |
| 945 | /* |
| 946 | * offload per node hstate [un]registration to a work thread |
| 947 | * when transitioning to/from memoryless state. |
| 948 | */ |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 949 | if (nid != NUMA_NO_NODE) |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 950 | schedule_work(&node_devices[nid]->node_work); |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 951 | break; |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 952 | |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 953 | case MEM_GOING_ONLINE: |
| 954 | case MEM_GOING_OFFLINE: |
| 955 | case MEM_CANCEL_ONLINE: |
| 956 | case MEM_CANCEL_OFFLINE: |
| 957 | default: |
| 958 | break; |
| 959 | } |
| 960 | |
| 961 | return NOTIFY_OK; |
| 962 | } |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 963 | #endif /* CONFIG_HUGETLBFS */ |
David Hildenbrand | 50f9481 | 2021-11-05 13:44:24 -0700 | [diff] [blame] | 964 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 965 | |
David Hildenbrand | 50f9481 | 2021-11-05 13:44:24 -0700 | [diff] [blame] | 966 | #if !defined(CONFIG_MEMORY_HOTPLUG) || !defined(CONFIG_HUGETLBFS) |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 967 | static inline int node_memory_callback(struct notifier_block *self, |
| 968 | unsigned long action, void *arg) |
| 969 | { |
| 970 | return NOTIFY_OK; |
| 971 | } |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 972 | |
| 973 | static void init_node_hugetlb_work(int nid) { } |
| 974 | |
| 975 | #endif |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 976 | |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 977 | int __register_one_node(int nid) |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 978 | { |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 979 | int error; |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 980 | int cpu; |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 981 | |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 982 | node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL); |
| 983 | if (!node_devices[nid]) |
| 984 | return -ENOMEM; |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 985 | |
Dou Liyang | a7be6e5 | 2017-07-10 15:49:20 -0700 | [diff] [blame] | 986 | error = register_node(node_devices[nid], nid); |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 987 | |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 988 | /* link cpu under this node */ |
| 989 | for_each_present_cpu(cpu) { |
| 990 | if (cpu_to_node(cpu) == nid) |
| 991 | register_cpu_under_node(cpu, nid); |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 992 | } |
| 993 | |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 994 | INIT_LIST_HEAD(&node_devices[nid]->access_list); |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 995 | /* initialize work queue for memory hot plug */ |
| 996 | init_node_hugetlb_work(nid); |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 997 | node_init_caches(nid); |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 998 | |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 999 | return error; |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 1000 | } |
| 1001 | |
| 1002 | void unregister_one_node(int nid) |
| 1003 | { |
Xishi Qiu | 92d585e | 2014-03-06 17:18:21 +0800 | [diff] [blame] | 1004 | if (!node_devices[nid]) |
| 1005 | return; |
| 1006 | |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 1007 | unregister_node(node_devices[nid]); |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 1008 | node_devices[nid] = NULL; |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 1009 | } |
| 1010 | |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 1011 | /* |
| 1012 | * node states attributes |
| 1013 | */ |
| 1014 | |
Andi Kleen | b15f562 | 2010-01-05 12:47:59 +0100 | [diff] [blame] | 1015 | struct node_attr { |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 1016 | struct device_attribute attr; |
Andi Kleen | b15f562 | 2010-01-05 12:47:59 +0100 | [diff] [blame] | 1017 | enum node_states state; |
| 1018 | }; |
| 1019 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 1020 | static ssize_t show_node_state(struct device *dev, |
| 1021 | struct device_attribute *attr, char *buf) |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 1022 | { |
Andi Kleen | b15f562 | 2010-01-05 12:47:59 +0100 | [diff] [blame] | 1023 | struct node_attr *na = container_of(attr, struct node_attr, attr); |
Joe Perches | 948b3ed | 2020-09-16 13:40:42 -0700 | [diff] [blame] | 1024 | |
| 1025 | return sysfs_emit(buf, "%*pbl\n", |
| 1026 | nodemask_pr_args(&node_states[na->state])); |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 1027 | } |
| 1028 | |
Andi Kleen | b15f562 | 2010-01-05 12:47:59 +0100 | [diff] [blame] | 1029 | #define _NODE_ATTR(name, state) \ |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 1030 | { __ATTR(name, 0444, show_node_state, NULL), state } |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 1031 | |
Andi Kleen | b15f562 | 2010-01-05 12:47:59 +0100 | [diff] [blame] | 1032 | static struct node_attr node_state_attr[] = { |
Lai Jiangshan | fcf07d2 | 2012-12-11 16:03:13 -0800 | [diff] [blame] | 1033 | [N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE), |
| 1034 | [N_ONLINE] = _NODE_ATTR(online, N_ONLINE), |
| 1035 | [N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY), |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 1036 | #ifdef CONFIG_HIGHMEM |
Lai Jiangshan | fcf07d2 | 2012-12-11 16:03:13 -0800 | [diff] [blame] | 1037 | [N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY), |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 1038 | #endif |
Lai Jiangshan | 20b2f52 | 2012-12-12 13:52:00 -0800 | [diff] [blame] | 1039 | [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY), |
Lai Jiangshan | fcf07d2 | 2012-12-11 16:03:13 -0800 | [diff] [blame] | 1040 | [N_CPU] = _NODE_ATTR(has_cpu, N_CPU), |
Jonathan Cameron | 894c26a | 2020-09-30 22:05:42 +0800 | [diff] [blame] | 1041 | [N_GENERIC_INITIATOR] = _NODE_ATTR(has_generic_initiator, |
| 1042 | N_GENERIC_INITIATOR), |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 1043 | }; |
| 1044 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 1045 | static struct attribute *node_state_attrs[] = { |
Lai Jiangshan | fcf07d2 | 2012-12-11 16:03:13 -0800 | [diff] [blame] | 1046 | &node_state_attr[N_POSSIBLE].attr.attr, |
| 1047 | &node_state_attr[N_ONLINE].attr.attr, |
| 1048 | &node_state_attr[N_NORMAL_MEMORY].attr.attr, |
Andi Kleen | 3701cde | 2010-01-05 12:48:04 +0100 | [diff] [blame] | 1049 | #ifdef CONFIG_HIGHMEM |
Lai Jiangshan | fcf07d2 | 2012-12-11 16:03:13 -0800 | [diff] [blame] | 1050 | &node_state_attr[N_HIGH_MEMORY].attr.attr, |
Andi Kleen | 3701cde | 2010-01-05 12:48:04 +0100 | [diff] [blame] | 1051 | #endif |
Lai Jiangshan | 20b2f52 | 2012-12-12 13:52:00 -0800 | [diff] [blame] | 1052 | &node_state_attr[N_MEMORY].attr.attr, |
Lai Jiangshan | fcf07d2 | 2012-12-11 16:03:13 -0800 | [diff] [blame] | 1053 | &node_state_attr[N_CPU].attr.attr, |
Jonathan Cameron | 894c26a | 2020-09-30 22:05:42 +0800 | [diff] [blame] | 1054 | &node_state_attr[N_GENERIC_INITIATOR].attr.attr, |
Andi Kleen | 3701cde | 2010-01-05 12:48:04 +0100 | [diff] [blame] | 1055 | NULL |
| 1056 | }; |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 1057 | |
Rikard Falkeborn | 5a57676 | 2021-05-28 23:34:08 +0200 | [diff] [blame] | 1058 | static const struct attribute_group memory_root_attr_group = { |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 1059 | .attrs = node_state_attrs, |
| 1060 | }; |
| 1061 | |
| 1062 | static const struct attribute_group *cpu_root_attr_groups[] = { |
| 1063 | &memory_root_attr_group, |
| 1064 | NULL, |
| 1065 | }; |
| 1066 | |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 1067 | #define NODE_CALLBACK_PRI 2 /* lower than SLAB */ |
Keiichiro Tokunaga | 4b45099 | 2005-05-08 21:28:53 +0900 | [diff] [blame] | 1068 | static int __init register_node_type(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1069 | { |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 1070 | int ret; |
| 1071 | |
Andi Kleen | 3701cde | 2010-01-05 12:48:04 +0100 | [diff] [blame] | 1072 | BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES); |
| 1073 | BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES); |
| 1074 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 1075 | ret = subsys_system_register(&node_subsys, cpu_root_attr_groups); |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 1076 | if (!ret) { |
Andrew Morton | 6e259e7 | 2013-04-29 15:08:07 -0700 | [diff] [blame] | 1077 | static struct notifier_block node_memory_callback_nb = { |
| 1078 | .notifier_call = node_memory_callback, |
| 1079 | .priority = NODE_CALLBACK_PRI, |
| 1080 | }; |
| 1081 | register_hotmemory_notifier(&node_memory_callback_nb); |
Lee Schermerhorn | 4faf8d9 | 2009-12-14 17:58:35 -0800 | [diff] [blame] | 1082 | } |
Lee Schermerhorn | bde631a | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 1083 | |
| 1084 | /* |
| 1085 | * Note: we're not going to unregister the node class if we fail |
| 1086 | * to register the node state class attribute files. |
| 1087 | */ |
| 1088 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1089 | } |
| 1090 | postcore_initcall(register_node_type); |