Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * include/linux/node.h - generic node definition |
| 4 | * |
| 5 | * This is mainly for topological representation. We define the |
| 6 | * basic 'struct node' here, which can be embedded in per-arch |
| 7 | * definitions of processors. |
| 8 | * |
| 9 | * Basic handling of the devices is done in drivers/base/node.c |
| 10 | * and system devices are handled in drivers/base/sys.c. |
| 11 | * |
| 12 | * Nodes are exported via driverfs in the class/node/devices/ |
| 13 | * directory. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | */ |
| 15 | #ifndef _LINUX_NODE_H_ |
| 16 | #define _LINUX_NODE_H_ |
| 17 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 18 | #include <linux/device.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/cpumask.h> |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 20 | #include <linux/list.h> |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 21 | #include <linux/workqueue.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
Keith Busch | e1cf33a | 2019-03-11 14:56:01 -0600 | [diff] [blame] | 23 | /** |
| 24 | * struct node_hmem_attrs - heterogeneous memory performance attributes |
| 25 | * |
| 26 | * @read_bandwidth: Read bandwidth in MB/s |
| 27 | * @write_bandwidth: Write bandwidth in MB/s |
| 28 | * @read_latency: Read latency in nanoseconds |
| 29 | * @write_latency: Write latency in nanoseconds |
| 30 | */ |
| 31 | struct node_hmem_attrs { |
| 32 | unsigned int read_bandwidth; |
| 33 | unsigned int write_bandwidth; |
| 34 | unsigned int read_latency; |
| 35 | unsigned int write_latency; |
| 36 | }; |
| 37 | |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 38 | enum cache_indexing { |
| 39 | NODE_CACHE_DIRECT_MAP, |
| 40 | NODE_CACHE_INDEXED, |
| 41 | NODE_CACHE_OTHER, |
| 42 | }; |
| 43 | |
| 44 | enum cache_write_policy { |
| 45 | NODE_CACHE_WRITE_BACK, |
| 46 | NODE_CACHE_WRITE_THROUGH, |
| 47 | NODE_CACHE_WRITE_OTHER, |
| 48 | }; |
| 49 | |
| 50 | /** |
| 51 | * struct node_cache_attrs - system memory caching attributes |
| 52 | * |
| 53 | * @indexing: The ways memory blocks may be placed in cache |
| 54 | * @write_policy: Write back or write through policy |
| 55 | * @size: Total size of cache in bytes |
| 56 | * @line_size: Number of bytes fetched on a cache miss |
| 57 | * @level: The cache hierarchy level |
| 58 | */ |
| 59 | struct node_cache_attrs { |
| 60 | enum cache_indexing indexing; |
| 61 | enum cache_write_policy write_policy; |
| 62 | u64 size; |
| 63 | u16 line_size; |
| 64 | u8 level; |
| 65 | }; |
| 66 | |
Keith Busch | e1cf33a | 2019-03-11 14:56:01 -0600 | [diff] [blame] | 67 | #ifdef CONFIG_HMEM_REPORTING |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 68 | void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs); |
Keith Busch | e1cf33a | 2019-03-11 14:56:01 -0600 | [diff] [blame] | 69 | void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs, |
| 70 | unsigned access); |
| 71 | #else |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 72 | static inline void node_add_cache(unsigned int nid, |
| 73 | struct node_cache_attrs *cache_attrs) |
| 74 | { |
| 75 | } |
| 76 | |
Keith Busch | e1cf33a | 2019-03-11 14:56:01 -0600 | [diff] [blame] | 77 | static inline void node_set_perf_attrs(unsigned int nid, |
| 78 | struct node_hmem_attrs *hmem_attrs, |
| 79 | unsigned access) |
| 80 | { |
| 81 | } |
| 82 | #endif |
| 83 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | struct node { |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 85 | struct device dev; |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 86 | struct list_head access_list; |
Lee Schermerhorn | 39da08c | 2009-12-14 17:58:36 -0800 | [diff] [blame] | 87 | |
| 88 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS) |
| 89 | struct work_struct node_work; |
| 90 | #endif |
Keith Busch | acc02a1 | 2019-03-11 14:56:02 -0600 | [diff] [blame] | 91 | #ifdef CONFIG_HMEM_REPORTING |
| 92 | struct list_head cache_attrs; |
| 93 | struct device *cache_dev; |
| 94 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | }; |
| 96 | |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 97 | struct memory_block; |
Wen Congyang | 8732794 | 2012-12-11 16:00:56 -0800 | [diff] [blame] | 98 | extern struct node *node_devices[]; |
Lee Schermerhorn | 9a305230 | 2009-12-14 17:58:25 -0800 | [diff] [blame] | 99 | typedef void (*node_registration_func_t)(struct node *); |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 100 | |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 101 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) |
Jonathan Cameron | a215586 | 2018-05-25 14:47:53 -0700 | [diff] [blame] | 102 | extern int link_mem_sections(int nid, unsigned long start_pfn, |
Oscar Salvador | 4fbce63 | 2018-08-17 15:46:22 -0700 | [diff] [blame] | 103 | unsigned long end_pfn); |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 104 | #else |
Jonathan Cameron | a215586 | 2018-05-25 14:47:53 -0700 | [diff] [blame] | 105 | static inline int link_mem_sections(int nid, unsigned long start_pfn, |
Oscar Salvador | 4fbce63 | 2018-08-17 15:46:22 -0700 | [diff] [blame] | 106 | unsigned long end_pfn) |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 107 | { |
| 108 | return 0; |
| 109 | } |
| 110 | #endif |
| 111 | |
Keiichiro Tokunaga | 4b45099 | 2005-05-08 21:28:53 +0900 | [diff] [blame] | 112 | extern void unregister_node(struct node *node); |
KAMEZAWA Hiroyuki | 36920e0 | 2006-08-27 01:23:52 -0700 | [diff] [blame] | 113 | #ifdef CONFIG_NUMA |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 114 | /* Core of the node registration - only memory hotplug should use this */ |
| 115 | extern int __register_one_node(int nid); |
| 116 | |
| 117 | /* Registers an online node */ |
| 118 | static inline int register_one_node(int nid) |
| 119 | { |
| 120 | int error = 0; |
| 121 | |
| 122 | if (node_online(nid)) { |
| 123 | struct pglist_data *pgdat = NODE_DATA(nid); |
Oscar Salvador | 4fbce63 | 2018-08-17 15:46:22 -0700 | [diff] [blame] | 124 | unsigned long start_pfn = pgdat->node_start_pfn; |
| 125 | unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 126 | |
| 127 | error = __register_one_node(nid); |
| 128 | if (error) |
| 129 | return error; |
| 130 | /* link memory sections under this node */ |
Oscar Salvador | 4fbce63 | 2018-08-17 15:46:22 -0700 | [diff] [blame] | 131 | error = link_mem_sections(nid, start_pfn, end_pfn); |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 132 | } |
| 133 | |
| 134 | return error; |
| 135 | } |
| 136 | |
Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 137 | extern void unregister_one_node(int nid); |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 138 | extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); |
| 139 | extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); |
David Hildenbrand | a31b264 | 2019-07-18 15:57:12 -0700 | [diff] [blame] | 140 | extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk); |
Lee Schermerhorn | 9a305230 | 2009-12-14 17:58:25 -0800 | [diff] [blame] | 141 | |
Keith Busch | 08d9dbe | 2019-03-11 14:56:00 -0600 | [diff] [blame] | 142 | extern int register_memory_node_under_compute_node(unsigned int mem_nid, |
| 143 | unsigned int cpu_nid, |
| 144 | unsigned access); |
| 145 | |
Lee Schermerhorn | 9a305230 | 2009-12-14 17:58:25 -0800 | [diff] [blame] | 146 | #ifdef CONFIG_HUGETLBFS |
| 147 | extern void register_hugetlbfs_with_node(node_registration_func_t doregister, |
| 148 | node_registration_func_t unregister); |
| 149 | #endif |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 150 | #else |
Michal Hocko | 9037a99 | 2017-07-06 15:37:49 -0700 | [diff] [blame] | 151 | static inline int __register_one_node(int nid) |
| 152 | { |
| 153 | return 0; |
| 154 | } |
KAMEZAWA Hiroyuki | 36920e0 | 2006-08-27 01:23:52 -0700 | [diff] [blame] | 155 | static inline int register_one_node(int nid) |
| 156 | { |
| 157 | return 0; |
| 158 | } |
| 159 | static inline int unregister_one_node(int nid) |
| 160 | { |
| 161 | return 0; |
| 162 | } |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 163 | static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid) |
| 164 | { |
| 165 | return 0; |
| 166 | } |
| 167 | static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) |
| 168 | { |
| 169 | return 0; |
| 170 | } |
David Hildenbrand | a31b264 | 2019-07-18 15:57:12 -0700 | [diff] [blame] | 171 | static inline void unregister_memory_block_under_nodes(struct memory_block *mem_blk) |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 172 | { |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 173 | } |
Lee Schermerhorn | 9a305230 | 2009-12-14 17:58:25 -0800 | [diff] [blame] | 174 | |
| 175 | static inline void register_hugetlbfs_with_node(node_registration_func_t reg, |
| 176 | node_registration_func_t unreg) |
| 177 | { |
| 178 | } |
KAMEZAWA Hiroyuki | 76b67ed | 2006-06-27 02:53:41 -0700 | [diff] [blame] | 179 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 181 | #define to_node(device) container_of(device, struct node, dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | |
| 183 | #endif /* _LINUX_NODE_H_ */ |