Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 2 | /* |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 3 | * Memory subsystem support |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 4 | * |
| 5 | * Written by Matt Tolentino <matthew.e.tolentino@intel.com> |
| 6 | * Dave Hansen <haveblue@us.ibm.com> |
| 7 | * |
| 8 | * This file provides the necessary infrastructure to represent |
| 9 | * a SPARSEMEM-memory-model system's physical memory in /sysfs. |
| 10 | * All arch-independent code that assumes MEMORY_HOTPLUG requires |
| 11 | * SPARSEMEM should be contained here, or in mm/memory_hotplug.c. |
| 12 | */ |
| 13 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 14 | #include <linux/module.h> |
| 15 | #include <linux/init.h> |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 16 | #include <linux/topology.h> |
Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 17 | #include <linux/capability.h> |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 18 | #include <linux/device.h> |
| 19 | #include <linux/memory.h> |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 20 | #include <linux/memory_hotplug.h> |
| 21 | #include <linux/mm.h> |
Daniel Walker | da19cbc | 2008-02-04 23:35:47 -0800 | [diff] [blame] | 22 | #include <linux/mutex.h> |
Shaohua Li | 9f1b16a | 2008-10-18 20:27:12 -0700 | [diff] [blame] | 23 | #include <linux/stat.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 24 | #include <linux/slab.h> |
Shaohua Li | 9f1b16a | 2008-10-18 20:27:12 -0700 | [diff] [blame] | 25 | |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 26 | #include <linux/atomic.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 27 | #include <linux/uaccess.h> |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 28 | |
Nathan Fontenot | 2938ffb | 2010-10-19 12:45:24 -0500 | [diff] [blame] | 29 | static DEFINE_MUTEX(mem_sysfs_mutex); |
| 30 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 31 | #define MEMORY_CLASS_NAME "memory" |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 32 | |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 33 | #define to_memory_block(dev) container_of(dev, struct memory_block, dev) |
| 34 | |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 35 | static int sections_per_block; |
| 36 | |
| 37 | static inline int base_memory_block_id(int section_nr) |
| 38 | { |
| 39 | return section_nr / sections_per_block; |
| 40 | } |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 41 | |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 42 | static int memory_subsys_online(struct device *dev); |
| 43 | static int memory_subsys_offline(struct device *dev); |
| 44 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 45 | static struct bus_type memory_subsys = { |
Kay Sievers | af5ca3f4 | 2007-12-20 02:09:39 +0100 | [diff] [blame] | 46 | .name = MEMORY_CLASS_NAME, |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 47 | .dev_name = MEMORY_CLASS_NAME, |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 48 | .online = memory_subsys_online, |
| 49 | .offline = memory_subsys_offline, |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 50 | }; |
| 51 | |
Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 52 | static BLOCKING_NOTIFIER_HEAD(memory_chain); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 53 | |
Andy Whitcroft | 98a38eb | 2006-01-06 00:10:35 -0800 | [diff] [blame] | 54 | int register_memory_notifier(struct notifier_block *nb) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 55 | { |
Ioana Ciornei | 2aeebca | 2015-03-08 12:48:35 +0200 | [diff] [blame] | 56 | return blocking_notifier_chain_register(&memory_chain, nb); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 57 | } |
Hannes Hering | 3c82c30 | 2008-05-07 14:43:01 +0200 | [diff] [blame] | 58 | EXPORT_SYMBOL(register_memory_notifier); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 59 | |
Andy Whitcroft | 98a38eb | 2006-01-06 00:10:35 -0800 | [diff] [blame] | 60 | void unregister_memory_notifier(struct notifier_block *nb) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 61 | { |
Ioana Ciornei | 2aeebca | 2015-03-08 12:48:35 +0200 | [diff] [blame] | 62 | blocking_notifier_chain_unregister(&memory_chain, nb); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 63 | } |
Hannes Hering | 3c82c30 | 2008-05-07 14:43:01 +0200 | [diff] [blame] | 64 | EXPORT_SYMBOL(unregister_memory_notifier); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 65 | |
Robert Jennings | 925cc71 | 2009-12-17 14:44:38 +0000 | [diff] [blame] | 66 | static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain); |
| 67 | |
| 68 | int register_memory_isolate_notifier(struct notifier_block *nb) |
| 69 | { |
| 70 | return atomic_notifier_chain_register(&memory_isolate_chain, nb); |
| 71 | } |
| 72 | EXPORT_SYMBOL(register_memory_isolate_notifier); |
| 73 | |
| 74 | void unregister_memory_isolate_notifier(struct notifier_block *nb) |
| 75 | { |
| 76 | atomic_notifier_chain_unregister(&memory_isolate_chain, nb); |
| 77 | } |
| 78 | EXPORT_SYMBOL(unregister_memory_isolate_notifier); |
| 79 | |
Yasuaki Ishimatsu | fa7194e | 2012-12-11 16:00:44 -0800 | [diff] [blame] | 80 | static void memory_block_release(struct device *dev) |
| 81 | { |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 82 | struct memory_block *mem = to_memory_block(dev); |
Yasuaki Ishimatsu | fa7194e | 2012-12-11 16:00:44 -0800 | [diff] [blame] | 83 | |
| 84 | kfree(mem); |
| 85 | } |
| 86 | |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 87 | unsigned long __weak memory_block_size_bytes(void) |
| 88 | { |
| 89 | return MIN_MEMORY_BLOCK_SIZE; |
| 90 | } |
Dave Hansen | c221c0b | 2019-02-25 10:57:40 -0800 | [diff] [blame] | 91 | EXPORT_SYMBOL_GPL(memory_block_size_bytes); |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 92 | |
| 93 | static unsigned long get_memory_block_size(void) |
| 94 | { |
| 95 | unsigned long block_sz; |
| 96 | |
| 97 | block_sz = memory_block_size_bytes(); |
| 98 | |
| 99 | /* Validate blk_sz is a power of 2 and not less than section size */ |
| 100 | if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) { |
| 101 | WARN_ON(1); |
| 102 | block_sz = MIN_MEMORY_BLOCK_SIZE; |
| 103 | } |
| 104 | |
| 105 | return block_sz; |
| 106 | } |
| 107 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 108 | /* |
| 109 | * use this as the physical section index that this memsection |
| 110 | * uses. |
| 111 | */ |
| 112 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 113 | static ssize_t phys_index_show(struct device *dev, |
| 114 | struct device_attribute *attr, char *buf) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 115 | { |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 116 | struct memory_block *mem = to_memory_block(dev); |
Nathan Fontenot | d336016 | 2011-01-20 10:44:29 -0600 | [diff] [blame] | 117 | unsigned long phys_index; |
| 118 | |
| 119 | phys_index = mem->start_section_nr / sections_per_block; |
| 120 | return sprintf(buf, "%08lx\n", phys_index); |
| 121 | } |
| 122 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 123 | /* |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 124 | * Show whether the section of memory is likely to be hot-removable |
| 125 | */ |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 126 | static ssize_t removable_show(struct device *dev, struct device_attribute *attr, |
| 127 | char *buf) |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 128 | { |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 129 | unsigned long i, pfn; |
| 130 | int ret = 1; |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 131 | struct memory_block *mem = to_memory_block(dev); |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 132 | |
Michal Hocko | 8b0662f | 2017-07-06 15:37:53 -0700 | [diff] [blame] | 133 | if (mem->state != MEM_ONLINE) |
| 134 | goto out; |
| 135 | |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 136 | for (i = 0; i < sections_per_block; i++) { |
Russ Anderson | 21ea9f5 | 2013-08-28 16:35:18 -0700 | [diff] [blame] | 137 | if (!present_section_nr(mem->start_section_nr + i)) |
| 138 | continue; |
Nathan Fontenot | d336016 | 2011-01-20 10:44:29 -0600 | [diff] [blame] | 139 | pfn = section_nr_to_pfn(mem->start_section_nr + i); |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 140 | ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); |
| 141 | } |
| 142 | |
Michal Hocko | 8b0662f | 2017-07-06 15:37:53 -0700 | [diff] [blame] | 143 | out: |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 144 | return sprintf(buf, "%d\n", ret); |
| 145 | } |
| 146 | |
| 147 | /* |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 148 | * online, offline, going offline, etc. |
| 149 | */ |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 150 | static ssize_t state_show(struct device *dev, struct device_attribute *attr, |
| 151 | char *buf) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 152 | { |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 153 | struct memory_block *mem = to_memory_block(dev); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 154 | ssize_t len = 0; |
| 155 | |
| 156 | /* |
| 157 | * We can probably put these states in a nice little array |
| 158 | * so that they're not open-coded |
| 159 | */ |
| 160 | switch (mem->state) { |
Ioana Ciornei | 3d3af6a | 2015-03-08 12:29:04 +0200 | [diff] [blame] | 161 | case MEM_ONLINE: |
| 162 | len = sprintf(buf, "online\n"); |
| 163 | break; |
| 164 | case MEM_OFFLINE: |
| 165 | len = sprintf(buf, "offline\n"); |
| 166 | break; |
| 167 | case MEM_GOING_OFFLINE: |
| 168 | len = sprintf(buf, "going-offline\n"); |
| 169 | break; |
| 170 | default: |
| 171 | len = sprintf(buf, "ERROR-UNKNOWN-%ld\n", |
| 172 | mem->state); |
| 173 | WARN_ON(1); |
| 174 | break; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | return len; |
| 178 | } |
| 179 | |
Yasunori Goto | 7b78d33 | 2007-10-21 16:41:36 -0700 | [diff] [blame] | 180 | int memory_notify(unsigned long val, void *v) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 181 | { |
Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 182 | return blocking_notifier_call_chain(&memory_chain, val, v); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 183 | } |
| 184 | |
Robert Jennings | 925cc71 | 2009-12-17 14:44:38 +0000 | [diff] [blame] | 185 | int memory_isolate_notify(unsigned long val, void *v) |
| 186 | { |
| 187 | return atomic_notifier_call_chain(&memory_isolate_chain, val, v); |
| 188 | } |
| 189 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 190 | /* |
Pavel Tatashin | b77eab7 | 2018-04-05 16:22:52 -0700 | [diff] [blame] | 191 | * The probe routines leave the pages uninitialized, just as the bootmem code |
| 192 | * does. Make sure we do not access them, but instead use only information from |
| 193 | * within sections. |
Mel Gorman | 2bbcb878 | 2011-10-17 16:38:20 +0200 | [diff] [blame] | 194 | */ |
Pavel Tatashin | b77eab7 | 2018-04-05 16:22:52 -0700 | [diff] [blame] | 195 | static bool pages_correctly_probed(unsigned long start_pfn) |
Mel Gorman | 2bbcb878 | 2011-10-17 16:38:20 +0200 | [diff] [blame] | 196 | { |
Pavel Tatashin | b77eab7 | 2018-04-05 16:22:52 -0700 | [diff] [blame] | 197 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
| 198 | unsigned long section_nr_end = section_nr + sections_per_block; |
Mel Gorman | 2bbcb878 | 2011-10-17 16:38:20 +0200 | [diff] [blame] | 199 | unsigned long pfn = start_pfn; |
| 200 | |
| 201 | /* |
| 202 | * memmap between sections is not contiguous except with |
| 203 | * SPARSEMEM_VMEMMAP. We lookup the page once per section |
| 204 | * and assume memmap is contiguous within each section |
| 205 | */ |
Pavel Tatashin | b77eab7 | 2018-04-05 16:22:52 -0700 | [diff] [blame] | 206 | for (; section_nr < section_nr_end; section_nr++) { |
Mel Gorman | 2bbcb878 | 2011-10-17 16:38:20 +0200 | [diff] [blame] | 207 | if (WARN_ON_ONCE(!pfn_valid(pfn))) |
| 208 | return false; |
Mel Gorman | 2bbcb878 | 2011-10-17 16:38:20 +0200 | [diff] [blame] | 209 | |
Pavel Tatashin | b77eab7 | 2018-04-05 16:22:52 -0700 | [diff] [blame] | 210 | if (!present_section_nr(section_nr)) { |
Michal Hocko | 1ecc07f | 2018-12-28 00:39:34 -0800 | [diff] [blame] | 211 | pr_warn("section %ld pfn[%lx, %lx) not present\n", |
Pavel Tatashin | b77eab7 | 2018-04-05 16:22:52 -0700 | [diff] [blame] | 212 | section_nr, pfn, pfn + PAGES_PER_SECTION); |
| 213 | return false; |
| 214 | } else if (!valid_section_nr(section_nr)) { |
Michal Hocko | 1ecc07f | 2018-12-28 00:39:34 -0800 | [diff] [blame] | 215 | pr_warn("section %ld pfn[%lx, %lx) no valid memmap\n", |
Pavel Tatashin | b77eab7 | 2018-04-05 16:22:52 -0700 | [diff] [blame] | 216 | section_nr, pfn, pfn + PAGES_PER_SECTION); |
| 217 | return false; |
| 218 | } else if (online_section_nr(section_nr)) { |
Michal Hocko | 1ecc07f | 2018-12-28 00:39:34 -0800 | [diff] [blame] | 219 | pr_warn("section %ld pfn[%lx, %lx) is already online\n", |
Pavel Tatashin | b77eab7 | 2018-04-05 16:22:52 -0700 | [diff] [blame] | 220 | section_nr, pfn, pfn + PAGES_PER_SECTION); |
Mel Gorman | 2bbcb878 | 2011-10-17 16:38:20 +0200 | [diff] [blame] | 221 | return false; |
| 222 | } |
Pavel Tatashin | b77eab7 | 2018-04-05 16:22:52 -0700 | [diff] [blame] | 223 | pfn += PAGES_PER_SECTION; |
Mel Gorman | 2bbcb878 | 2011-10-17 16:38:20 +0200 | [diff] [blame] | 224 | } |
| 225 | |
| 226 | return true; |
| 227 | } |
| 228 | |
| 229 | /* |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 230 | * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is |
| 231 | * OK to have direct references to sparsemem variables in here. |
| 232 | */ |
| 233 | static int |
Lai Jiangshan | 511c2ab | 2012-12-11 16:03:16 -0800 | [diff] [blame] | 234 | memory_block_action(unsigned long phys_index, unsigned long action, int online_type) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 235 | { |
Wen Congyang | a16cee1 | 2012-10-08 16:33:58 -0700 | [diff] [blame] | 236 | unsigned long start_pfn; |
Anton Blanchard | 5409d2c | 2011-05-11 17:25:14 +1000 | [diff] [blame] | 237 | unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 238 | int ret; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 239 | |
Sheng Yong | 19c07d5 | 2015-04-14 15:44:54 -0700 | [diff] [blame] | 240 | start_pfn = section_nr_to_pfn(phys_index); |
Greg Kroah-Hartman | de0ed36 | 2011-10-18 14:00:57 -0700 | [diff] [blame] | 241 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 242 | switch (action) { |
Ioana Ciornei | 3d3af6a | 2015-03-08 12:29:04 +0200 | [diff] [blame] | 243 | case MEM_ONLINE: |
Pavel Tatashin | b77eab7 | 2018-04-05 16:22:52 -0700 | [diff] [blame] | 244 | if (!pages_correctly_probed(start_pfn)) |
Ioana Ciornei | 3d3af6a | 2015-03-08 12:29:04 +0200 | [diff] [blame] | 245 | return -EBUSY; |
Mel Gorman | 2bbcb878 | 2011-10-17 16:38:20 +0200 | [diff] [blame] | 246 | |
Ioana Ciornei | 3d3af6a | 2015-03-08 12:29:04 +0200 | [diff] [blame] | 247 | ret = online_pages(start_pfn, nr_pages, online_type); |
| 248 | break; |
| 249 | case MEM_OFFLINE: |
| 250 | ret = offline_pages(start_pfn, nr_pages); |
| 251 | break; |
| 252 | default: |
| 253 | WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " |
| 254 | "%ld\n", __func__, phys_index, action, action); |
| 255 | ret = -EINVAL; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 256 | } |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 257 | |
| 258 | return ret; |
| 259 | } |
| 260 | |
Nathan Fontenot | dc18d70 | 2017-02-24 15:00:02 -0800 | [diff] [blame] | 261 | static int memory_block_change_state(struct memory_block *mem, |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 262 | unsigned long to_state, unsigned long from_state_req) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 263 | { |
Greg Kroah-Hartman | de0ed36 | 2011-10-18 14:00:57 -0700 | [diff] [blame] | 264 | int ret = 0; |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 265 | |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 266 | if (mem->state != from_state_req) |
| 267 | return -EINVAL; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 268 | |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 269 | if (to_state == MEM_OFFLINE) |
| 270 | mem->state = MEM_GOING_OFFLINE; |
| 271 | |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 272 | ret = memory_block_action(mem->start_section_nr, to_state, |
| 273 | mem->online_type); |
| 274 | |
Rafael J. Wysocki | b2c064b | 2013-05-23 10:38:55 +0200 | [diff] [blame] | 275 | mem->state = ret ? from_state_req : to_state; |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 276 | |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 277 | return ret; |
| 278 | } |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 279 | |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 280 | /* The device lock serializes operations on memory_subsys_[online|offline] */ |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 281 | static int memory_subsys_online(struct device *dev) |
| 282 | { |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 283 | struct memory_block *mem = to_memory_block(dev); |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 284 | int ret; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 285 | |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 286 | if (mem->state == MEM_ONLINE) |
| 287 | return 0; |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 288 | |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 289 | /* |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 290 | * If we are called from state_store(), online_type will be |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 291 | * set >= 0 Otherwise we were called from the device online |
| 292 | * attribute and need to set the online_type. |
| 293 | */ |
| 294 | if (mem->online_type < 0) |
Tang Chen | 4f7c6b4 | 2014-08-06 16:05:13 -0700 | [diff] [blame] | 295 | mem->online_type = MMOP_ONLINE_KEEP; |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 296 | |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 297 | ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); |
| 298 | |
| 299 | /* clear online_type */ |
| 300 | mem->online_type = -1; |
| 301 | |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 302 | return ret; |
| 303 | } |
| 304 | |
| 305 | static int memory_subsys_offline(struct device *dev) |
| 306 | { |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 307 | struct memory_block *mem = to_memory_block(dev); |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 308 | |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 309 | if (mem->state == MEM_OFFLINE) |
| 310 | return 0; |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 311 | |
Seth Jennings | 26bbe7e | 2015-12-11 13:40:57 -0800 | [diff] [blame] | 312 | /* Can't offline block with non-present sections */ |
| 313 | if (mem->section_count != sections_per_block) |
| 314 | return -EINVAL; |
| 315 | |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 316 | return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 317 | } |
| 318 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 319 | static ssize_t state_store(struct device *dev, struct device_attribute *attr, |
| 320 | const char *buf, size_t count) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 321 | { |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 322 | struct memory_block *mem = to_memory_block(dev); |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 323 | int ret, online_type; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 324 | |
Rafael J. Wysocki | 5e33bc4 | 2013-08-28 21:41:01 +0200 | [diff] [blame] | 325 | ret = lock_device_hotplug_sysfs(); |
| 326 | if (ret) |
| 327 | return ret; |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 328 | |
Tang Chen | 1f6a6cc | 2014-08-06 16:05:11 -0700 | [diff] [blame] | 329 | if (sysfs_streq(buf, "online_kernel")) |
Tang Chen | 4f7c6b4 | 2014-08-06 16:05:13 -0700 | [diff] [blame] | 330 | online_type = MMOP_ONLINE_KERNEL; |
Tang Chen | 1f6a6cc | 2014-08-06 16:05:11 -0700 | [diff] [blame] | 331 | else if (sysfs_streq(buf, "online_movable")) |
Tang Chen | 4f7c6b4 | 2014-08-06 16:05:13 -0700 | [diff] [blame] | 332 | online_type = MMOP_ONLINE_MOVABLE; |
Tang Chen | 1f6a6cc | 2014-08-06 16:05:11 -0700 | [diff] [blame] | 333 | else if (sysfs_streq(buf, "online")) |
Tang Chen | 4f7c6b4 | 2014-08-06 16:05:13 -0700 | [diff] [blame] | 334 | online_type = MMOP_ONLINE_KEEP; |
Tang Chen | 1f6a6cc | 2014-08-06 16:05:11 -0700 | [diff] [blame] | 335 | else if (sysfs_streq(buf, "offline")) |
Tang Chen | 4f7c6b4 | 2014-08-06 16:05:13 -0700 | [diff] [blame] | 336 | online_type = MMOP_OFFLINE; |
Yasuaki Ishimatsu | a37f863 | 2013-10-11 15:36:25 +0900 | [diff] [blame] | 337 | else { |
| 338 | ret = -EINVAL; |
| 339 | goto err; |
| 340 | } |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 341 | |
| 342 | switch (online_type) { |
Tang Chen | 4f7c6b4 | 2014-08-06 16:05:13 -0700 | [diff] [blame] | 343 | case MMOP_ONLINE_KERNEL: |
| 344 | case MMOP_ONLINE_MOVABLE: |
| 345 | case MMOP_ONLINE_KEEP: |
David Hildenbrand | 381eab4 | 2018-10-30 15:10:29 -0700 | [diff] [blame] | 346 | /* mem->online_type is protected by device_hotplug_lock */ |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 347 | mem->online_type = online_type; |
| 348 | ret = device_online(&mem->dev); |
| 349 | break; |
Tang Chen | 4f7c6b4 | 2014-08-06 16:05:13 -0700 | [diff] [blame] | 350 | case MMOP_OFFLINE: |
Seth Jennings | fa2be40 | 2013-08-20 16:05:05 -0500 | [diff] [blame] | 351 | ret = device_offline(&mem->dev); |
| 352 | break; |
| 353 | default: |
| 354 | ret = -EINVAL; /* should never happen */ |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 355 | } |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 356 | |
Yasuaki Ishimatsu | a37f863 | 2013-10-11 15:36:25 +0900 | [diff] [blame] | 357 | err: |
Rafael J. Wysocki | 4960e05 | 2013-05-08 14:18:37 +0200 | [diff] [blame] | 358 | unlock_device_hotplug(); |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 359 | |
Reza Arbab | d66ba15 | 2016-10-07 17:00:15 -0700 | [diff] [blame] | 360 | if (ret < 0) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 361 | return ret; |
Reza Arbab | d66ba15 | 2016-10-07 17:00:15 -0700 | [diff] [blame] | 362 | if (ret) |
| 363 | return -EINVAL; |
| 364 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 365 | return count; |
| 366 | } |
| 367 | |
| 368 | /* |
| 369 | * phys_device is a bad name for this. What I really want |
| 370 | * is a way to differentiate between memory ranges that |
| 371 | * are part of physical devices that constitute |
| 372 | * a complete removable unit or fru. |
| 373 | * i.e. do these ranges belong to the same physical device, |
| 374 | * s.t. if I offline all of these sections I can then |
| 375 | * remove the physical device? |
| 376 | */ |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 377 | static ssize_t phys_device_show(struct device *dev, |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 378 | struct device_attribute *attr, char *buf) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 379 | { |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 380 | struct memory_block *mem = to_memory_block(dev); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 381 | return sprintf(buf, "%d\n", mem->phys_device); |
| 382 | } |
| 383 | |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 384 | #ifdef CONFIG_MEMORY_HOTREMOVE |
Michal Hocko | e5e6893 | 2017-09-06 16:19:37 -0700 | [diff] [blame] | 385 | static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn, |
| 386 | unsigned long nr_pages, int online_type, |
| 387 | struct zone *default_zone) |
| 388 | { |
| 389 | struct zone *zone; |
| 390 | |
Michal Hocko | e5e6893 | 2017-09-06 16:19:37 -0700 | [diff] [blame] | 391 | zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); |
| 392 | if (zone != default_zone) { |
| 393 | strcat(buf, " "); |
| 394 | strcat(buf, zone->name); |
| 395 | } |
| 396 | } |
| 397 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 398 | static ssize_t valid_zones_show(struct device *dev, |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 399 | struct device_attribute *attr, char *buf) |
| 400 | { |
| 401 | struct memory_block *mem = to_memory_block(dev); |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 402 | unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 403 | unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 404 | unsigned long valid_start_pfn, valid_end_pfn; |
Michal Hocko | e5e6893 | 2017-09-06 16:19:37 -0700 | [diff] [blame] | 405 | struct zone *default_zone; |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 406 | int nid; |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 407 | |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 408 | /* |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 409 | * Check the existing zone. Make sure that we do that only on the |
| 410 | * online nodes otherwise the page_zone is not reliable |
| 411 | */ |
| 412 | if (mem->state == MEM_ONLINE) { |
Mikhail Zaslonko | 4e8346d | 2018-09-04 15:46:09 -0700 | [diff] [blame] | 413 | /* |
| 414 | * The block contains more than one zone can not be offlined. |
| 415 | * This can happen e.g. for ZONE_DMA and ZONE_DMA32 |
| 416 | */ |
| 417 | if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, |
| 418 | &valid_start_pfn, &valid_end_pfn)) |
| 419 | return sprintf(buf, "none\n"); |
| 420 | start_pfn = valid_start_pfn; |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 421 | strcat(buf, page_zone(pfn_to_page(start_pfn))->name); |
| 422 | goto out; |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 423 | } |
| 424 | |
Mikhail Zaslonko | 4e8346d | 2018-09-04 15:46:09 -0700 | [diff] [blame] | 425 | nid = mem->nid; |
Michal Hocko | e5e6893 | 2017-09-06 16:19:37 -0700 | [diff] [blame] | 426 | default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); |
| 427 | strcat(buf, default_zone->name); |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 428 | |
Michal Hocko | e5e6893 | 2017-09-06 16:19:37 -0700 | [diff] [blame] | 429 | print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL, |
| 430 | default_zone); |
| 431 | print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE, |
| 432 | default_zone); |
Michal Hocko | f1dd2cd | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 433 | out: |
Reza Arbab | a371d9f | 2016-07-26 15:22:27 -0700 | [diff] [blame] | 434 | strcat(buf, "\n"); |
| 435 | |
| 436 | return strlen(buf); |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 437 | } |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 438 | static DEVICE_ATTR_RO(valid_zones); |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 439 | #endif |
| 440 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 441 | static DEVICE_ATTR_RO(phys_index); |
| 442 | static DEVICE_ATTR_RW(state); |
| 443 | static DEVICE_ATTR_RO(phys_device); |
| 444 | static DEVICE_ATTR_RO(removable); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 445 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 446 | /* |
| 447 | * Block size attribute stuff |
| 448 | */ |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 449 | static ssize_t block_size_bytes_show(struct device *dev, |
| 450 | struct device_attribute *attr, char *buf) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 451 | { |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 452 | return sprintf(buf, "%lx\n", get_memory_block_size()); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 453 | } |
| 454 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 455 | static DEVICE_ATTR_RO(block_size_bytes); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 456 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 457 | /* |
Vitaly Kuznetsov | 31bc385 | 2016-03-15 14:56:48 -0700 | [diff] [blame] | 458 | * Memory auto online policy. |
| 459 | */ |
| 460 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 461 | static ssize_t auto_online_blocks_show(struct device *dev, |
| 462 | struct device_attribute *attr, char *buf) |
Vitaly Kuznetsov | 31bc385 | 2016-03-15 14:56:48 -0700 | [diff] [blame] | 463 | { |
| 464 | if (memhp_auto_online) |
| 465 | return sprintf(buf, "online\n"); |
| 466 | else |
| 467 | return sprintf(buf, "offline\n"); |
| 468 | } |
| 469 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 470 | static ssize_t auto_online_blocks_store(struct device *dev, |
| 471 | struct device_attribute *attr, |
| 472 | const char *buf, size_t count) |
Vitaly Kuznetsov | 31bc385 | 2016-03-15 14:56:48 -0700 | [diff] [blame] | 473 | { |
| 474 | if (sysfs_streq(buf, "online")) |
| 475 | memhp_auto_online = true; |
| 476 | else if (sysfs_streq(buf, "offline")) |
| 477 | memhp_auto_online = false; |
| 478 | else |
| 479 | return -EINVAL; |
| 480 | |
| 481 | return count; |
| 482 | } |
| 483 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 484 | static DEVICE_ATTR_RW(auto_online_blocks); |
Vitaly Kuznetsov | 31bc385 | 2016-03-15 14:56:48 -0700 | [diff] [blame] | 485 | |
| 486 | /* |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 487 | * Some architectures will have custom drivers to do this, and |
| 488 | * will not need to do it from userspace. The fake hot-add code |
| 489 | * as well as ppc64 will do all of their discovery in userspace |
| 490 | * and will require this interface. |
| 491 | */ |
| 492 | #ifdef CONFIG_ARCH_MEMORY_PROBE |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 493 | static ssize_t probe_store(struct device *dev, struct device_attribute *attr, |
| 494 | const char *buf, size_t count) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 495 | { |
| 496 | u64 phys_addr; |
John Allen | cb5490a | 2016-01-14 15:22:16 -0800 | [diff] [blame] | 497 | int nid, ret; |
Anton Blanchard | 61b94fe | 2011-09-15 06:26:15 +1000 | [diff] [blame] | 498 | unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 499 | |
Zhang Zhen | b69deb2 | 2014-08-06 16:06:06 -0700 | [diff] [blame] | 500 | ret = kstrtoull(buf, 0, &phys_addr); |
| 501 | if (ret) |
| 502 | return ret; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 503 | |
Anton Blanchard | 61b94fe | 2011-09-15 06:26:15 +1000 | [diff] [blame] | 504 | if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) |
| 505 | return -EINVAL; |
| 506 | |
David Hildenbrand | 8df1d0e | 2018-10-30 15:10:24 -0700 | [diff] [blame] | 507 | ret = lock_device_hotplug_sysfs(); |
| 508 | if (ret) |
zhong jiang | 3780384 | 2019-04-18 17:50:16 -0700 | [diff] [blame^] | 509 | return ret; |
David Hildenbrand | 8df1d0e | 2018-10-30 15:10:24 -0700 | [diff] [blame] | 510 | |
John Allen | cb5490a | 2016-01-14 15:22:16 -0800 | [diff] [blame] | 511 | nid = memory_add_physaddr_to_nid(phys_addr); |
David Hildenbrand | 8df1d0e | 2018-10-30 15:10:24 -0700 | [diff] [blame] | 512 | ret = __add_memory(nid, phys_addr, |
| 513 | MIN_MEMORY_BLOCK_SIZE * sections_per_block); |
Nathan Fontenot | 6add7cd | 2011-01-31 10:55:23 -0600 | [diff] [blame] | 514 | |
John Allen | cb5490a | 2016-01-14 15:22:16 -0800 | [diff] [blame] | 515 | if (ret) |
| 516 | goto out; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 517 | |
Nikanth Karthikesan | 9f0af69 | 2011-03-24 11:46:18 +0530 | [diff] [blame] | 518 | ret = count; |
| 519 | out: |
David Hildenbrand | 8df1d0e | 2018-10-30 15:10:24 -0700 | [diff] [blame] | 520 | unlock_device_hotplug(); |
Nikanth Karthikesan | 9f0af69 | 2011-03-24 11:46:18 +0530 | [diff] [blame] | 521 | return ret; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 522 | } |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 523 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 524 | static DEVICE_ATTR_WO(probe); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 525 | #endif |
| 526 | |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 527 | #ifdef CONFIG_MEMORY_FAILURE |
| 528 | /* |
| 529 | * Support for offlining pages of memory |
| 530 | */ |
| 531 | |
| 532 | /* Soft offline a page */ |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 533 | static ssize_t soft_offline_page_store(struct device *dev, |
| 534 | struct device_attribute *attr, |
| 535 | const char *buf, size_t count) |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 536 | { |
| 537 | int ret; |
| 538 | u64 pfn; |
| 539 | if (!capable(CAP_SYS_ADMIN)) |
| 540 | return -EPERM; |
Jingoo Han | 34da5e6 | 2013-07-26 13:10:22 +0900 | [diff] [blame] | 541 | if (kstrtoull(buf, 0, &pfn) < 0) |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 542 | return -EINVAL; |
| 543 | pfn >>= PAGE_SHIFT; |
| 544 | if (!pfn_valid(pfn)) |
| 545 | return -ENXIO; |
| 546 | ret = soft_offline_page(pfn_to_page(pfn), 0); |
| 547 | return ret == 0 ? count : ret; |
| 548 | } |
| 549 | |
| 550 | /* Forcibly offline a page, including killing processes. */ |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 551 | static ssize_t hard_offline_page_store(struct device *dev, |
| 552 | struct device_attribute *attr, |
| 553 | const char *buf, size_t count) |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 554 | { |
| 555 | int ret; |
| 556 | u64 pfn; |
| 557 | if (!capable(CAP_SYS_ADMIN)) |
| 558 | return -EPERM; |
Jingoo Han | 34da5e6 | 2013-07-26 13:10:22 +0900 | [diff] [blame] | 559 | if (kstrtoull(buf, 0, &pfn) < 0) |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 560 | return -EINVAL; |
| 561 | pfn >>= PAGE_SHIFT; |
Eric W. Biederman | 83b5753 | 2017-07-09 18:14:01 -0500 | [diff] [blame] | 562 | ret = memory_failure(pfn, 0); |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 563 | return ret ? ret : count; |
| 564 | } |
| 565 | |
David Hildenbrand | 3f8e917 | 2018-12-03 12:16:11 +0100 | [diff] [blame] | 566 | static DEVICE_ATTR_WO(soft_offline_page); |
| 567 | static DEVICE_ATTR_WO(hard_offline_page); |
Andi Kleen | facb601 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 568 | #endif |
| 569 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 570 | /* |
| 571 | * Note that phys_device is optional. It is here to allow for |
| 572 | * differentiation between which *physical* devices each |
| 573 | * section belongs to... |
| 574 | */ |
Heiko Carstens | bc32df0 | 2010-03-15 00:35:03 -0400 | [diff] [blame] | 575 | int __weak arch_get_memory_phys_device(unsigned long start_pfn) |
| 576 | { |
| 577 | return 0; |
| 578 | } |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 579 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 580 | /* |
| 581 | * A reference for the returned object is held and the reference for the |
| 582 | * hinted object is released. |
| 583 | */ |
Robin Holt | 9838303 | 2010-09-29 14:00:55 -0500 | [diff] [blame] | 584 | struct memory_block *find_memory_block_hinted(struct mem_section *section, |
| 585 | struct memory_block *hint) |
| 586 | { |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 587 | int block_id = base_memory_block_id(__section_nr(section)); |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 588 | struct device *hintdev = hint ? &hint->dev : NULL; |
| 589 | struct device *dev; |
Robin Holt | 9838303 | 2010-09-29 14:00:55 -0500 | [diff] [blame] | 590 | |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 591 | dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev); |
| 592 | if (hint) |
| 593 | put_device(&hint->dev); |
| 594 | if (!dev) |
Robin Holt | 9838303 | 2010-09-29 14:00:55 -0500 | [diff] [blame] | 595 | return NULL; |
Gu Zheng | 7315f0c | 2013-08-28 14:38:27 +0800 | [diff] [blame] | 596 | return to_memory_block(dev); |
Robin Holt | 9838303 | 2010-09-29 14:00:55 -0500 | [diff] [blame] | 597 | } |
| 598 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 599 | /* |
| 600 | * For now, we have a linear search to go find the appropriate |
| 601 | * memory_block corresponding to a particular phys_index. If |
| 602 | * this gets to be a real problem, we can always use a radix |
| 603 | * tree or something here. |
| 604 | * |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 605 | * This could be made generic for all device subsystems. |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 606 | */ |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 607 | struct memory_block *find_memory_block(struct mem_section *section) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 608 | { |
Robin Holt | 9838303 | 2010-09-29 14:00:55 -0500 | [diff] [blame] | 609 | return find_memory_block_hinted(section, NULL); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 610 | } |
| 611 | |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 612 | static struct attribute *memory_memblk_attrs[] = { |
| 613 | &dev_attr_phys_index.attr, |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 614 | &dev_attr_state.attr, |
| 615 | &dev_attr_phys_device.attr, |
| 616 | &dev_attr_removable.attr, |
Zhang Zhen | ed2f240 | 2014-10-09 15:26:31 -0700 | [diff] [blame] | 617 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 618 | &dev_attr_valid_zones.attr, |
| 619 | #endif |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 620 | NULL |
| 621 | }; |
| 622 | |
| 623 | static struct attribute_group memory_memblk_attr_group = { |
| 624 | .attrs = memory_memblk_attrs, |
| 625 | }; |
| 626 | |
| 627 | static const struct attribute_group *memory_memblk_attr_groups[] = { |
| 628 | &memory_memblk_attr_group, |
| 629 | NULL, |
| 630 | }; |
| 631 | |
| 632 | /* |
| 633 | * register_memory - Setup a sysfs device for a memory block |
| 634 | */ |
| 635 | static |
| 636 | int register_memory(struct memory_block *memory) |
| 637 | { |
Arvind Yadav | 085aa2d | 2018-04-26 21:12:09 +0530 | [diff] [blame] | 638 | int ret; |
| 639 | |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 640 | memory->dev.bus = &memory_subsys; |
| 641 | memory->dev.id = memory->start_section_nr / sections_per_block; |
| 642 | memory->dev.release = memory_block_release; |
| 643 | memory->dev.groups = memory_memblk_attr_groups; |
Linus Torvalds | f991fae | 2013-07-03 14:35:40 -0700 | [diff] [blame] | 644 | memory->dev.offline = memory->state == MEM_OFFLINE; |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 645 | |
Arvind Yadav | 085aa2d | 2018-04-26 21:12:09 +0530 | [diff] [blame] | 646 | ret = device_register(&memory->dev); |
| 647 | if (ret) |
| 648 | put_device(&memory->dev); |
| 649 | |
| 650 | return ret; |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 651 | } |
| 652 | |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 653 | static int init_memory_block(struct memory_block **memory, |
| 654 | struct mem_section *section, unsigned long state) |
Nathan Fontenot | e4619c8 | 2010-10-19 12:44:20 -0500 | [diff] [blame] | 655 | { |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 656 | struct memory_block *mem; |
Nathan Fontenot | e4619c8 | 2010-10-19 12:44:20 -0500 | [diff] [blame] | 657 | unsigned long start_pfn; |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 658 | int scn_nr; |
Nathan Fontenot | e4619c8 | 2010-10-19 12:44:20 -0500 | [diff] [blame] | 659 | int ret = 0; |
| 660 | |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 661 | mem = kzalloc(sizeof(*mem), GFP_KERNEL); |
Nathan Fontenot | e4619c8 | 2010-10-19 12:44:20 -0500 | [diff] [blame] | 662 | if (!mem) |
| 663 | return -ENOMEM; |
| 664 | |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 665 | scn_nr = __section_nr(section); |
Nathan Fontenot | d336016 | 2011-01-20 10:44:29 -0600 | [diff] [blame] | 666 | mem->start_section_nr = |
| 667 | base_memory_block_id(scn_nr) * sections_per_block; |
| 668 | mem->end_section_nr = mem->start_section_nr + sections_per_block - 1; |
Nathan Fontenot | e4619c8 | 2010-10-19 12:44:20 -0500 | [diff] [blame] | 669 | mem->state = state; |
Nathan Fontenot | d336016 | 2011-01-20 10:44:29 -0600 | [diff] [blame] | 670 | start_pfn = section_nr_to_pfn(mem->start_section_nr); |
Nathan Fontenot | e4619c8 | 2010-10-19 12:44:20 -0500 | [diff] [blame] | 671 | mem->phys_device = arch_get_memory_phys_device(start_pfn); |
| 672 | |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 673 | ret = register_memory(mem); |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 674 | |
| 675 | *memory = mem; |
| 676 | return ret; |
| 677 | } |
| 678 | |
Seth Jennings | cb5e39b | 2013-08-20 12:13:03 -0500 | [diff] [blame] | 679 | static int add_memory_block(int base_section_nr) |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 680 | { |
Seth Jennings | cb5e39b | 2013-08-20 12:13:03 -0500 | [diff] [blame] | 681 | struct memory_block *mem; |
| 682 | int i, ret, section_count = 0, section_nr; |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 683 | |
Seth Jennings | cb5e39b | 2013-08-20 12:13:03 -0500 | [diff] [blame] | 684 | for (i = base_section_nr; |
Wei Yang | 3b6fd6f | 2018-12-28 00:35:33 -0800 | [diff] [blame] | 685 | i < base_section_nr + sections_per_block; |
Seth Jennings | cb5e39b | 2013-08-20 12:13:03 -0500 | [diff] [blame] | 686 | i++) { |
| 687 | if (!present_section_nr(i)) |
| 688 | continue; |
| 689 | if (section_count == 0) |
| 690 | section_nr = i; |
| 691 | section_count++; |
Yinghai Lu | 321bf4e | 2012-01-30 13:57:12 -0800 | [diff] [blame] | 692 | } |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 693 | |
Seth Jennings | cb5e39b | 2013-08-20 12:13:03 -0500 | [diff] [blame] | 694 | if (section_count == 0) |
| 695 | return 0; |
| 696 | ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE); |
| 697 | if (ret) |
| 698 | return ret; |
| 699 | mem->section_count = section_count; |
| 700 | return 0; |
Nathan Fontenot | e4619c8 | 2010-10-19 12:44:20 -0500 | [diff] [blame] | 701 | } |
| 702 | |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 703 | /* |
| 704 | * need an interface for the VM to add new memory regions, |
| 705 | * but without onlining it. |
| 706 | */ |
Pavel Tatashin | fc44f7f | 2018-04-05 16:22:56 -0700 | [diff] [blame] | 707 | int hotplug_memory_register(int nid, struct mem_section *section) |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 708 | { |
Seth Jennings | d7f8053 | 2013-08-20 12:13:00 -0500 | [diff] [blame] | 709 | int ret = 0; |
| 710 | struct memory_block *mem; |
Seth Jennings | b1eaef3 | 2013-08-20 12:12:57 -0500 | [diff] [blame] | 711 | |
| 712 | mutex_lock(&mem_sysfs_mutex); |
Seth Jennings | b1eaef3 | 2013-08-20 12:12:57 -0500 | [diff] [blame] | 713 | |
Seth Jennings | d7f8053 | 2013-08-20 12:13:00 -0500 | [diff] [blame] | 714 | mem = find_memory_block(section); |
| 715 | if (mem) { |
| 716 | mem->section_count++; |
| 717 | put_device(&mem->dev); |
| 718 | } else { |
| 719 | ret = init_memory_block(&mem, section, MEM_OFFLINE); |
| 720 | if (ret) |
| 721 | goto out; |
Seth Jennings | 56c6b5d | 2016-01-14 15:20:21 -0800 | [diff] [blame] | 722 | mem->section_count++; |
Seth Jennings | d7f8053 | 2013-08-20 12:13:00 -0500 | [diff] [blame] | 723 | } |
| 724 | |
Seth Jennings | d7f8053 | 2013-08-20 12:13:00 -0500 | [diff] [blame] | 725 | out: |
| 726 | mutex_unlock(&mem_sysfs_mutex); |
Seth Jennings | b1eaef3 | 2013-08-20 12:12:57 -0500 | [diff] [blame] | 727 | return ret; |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 728 | } |
| 729 | |
| 730 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 731 | static void |
| 732 | unregister_memory(struct memory_block *memory) |
| 733 | { |
| 734 | BUG_ON(memory->dev.bus != &memory_subsys); |
| 735 | |
Dan Carpenter | 16df145 | 2018-12-19 22:19:25 +0300 | [diff] [blame] | 736 | /* drop the ref. we got in remove_memory_section() */ |
Seth Jennings | df2b717 | 2013-08-20 12:12:59 -0500 | [diff] [blame] | 737 | put_device(&memory->dev); |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 738 | device_unregister(&memory->dev); |
| 739 | } |
| 740 | |
Seth Jennings | cc292b0 | 2016-01-14 15:20:24 -0800 | [diff] [blame] | 741 | static int remove_memory_section(unsigned long node_id, |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 742 | struct mem_section *section, int phys_device) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 743 | { |
| 744 | struct memory_block *mem; |
| 745 | |
Nathan Fontenot | 2938ffb | 2010-10-19 12:45:24 -0500 | [diff] [blame] | 746 | mutex_lock(&mem_sysfs_mutex); |
Michal Hocko | 1b862ae | 2017-07-06 15:37:45 -0700 | [diff] [blame] | 747 | |
| 748 | /* |
| 749 | * Some users of the memory hotplug do not want/need memblock to |
| 750 | * track all sections. Skip over those. |
| 751 | */ |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 752 | mem = find_memory_block(section); |
Michal Hocko | 1b862ae | 2017-07-06 15:37:45 -0700 | [diff] [blame] | 753 | if (!mem) |
| 754 | goto out_unlock; |
| 755 | |
Nathan Fontenot | d336016 | 2011-01-20 10:44:29 -0600 | [diff] [blame] | 756 | unregister_mem_sect_under_nodes(mem, __section_nr(section)); |
Nathan Fontenot | 0768121 | 2010-10-19 12:46:19 -0500 | [diff] [blame] | 757 | |
| 758 | mem->section_count--; |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 759 | if (mem->section_count == 0) |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 760 | unregister_memory(mem); |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 761 | else |
Seth Jennings | df2b717 | 2013-08-20 12:12:59 -0500 | [diff] [blame] | 762 | put_device(&mem->dev); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 763 | |
Michal Hocko | 1b862ae | 2017-07-06 15:37:45 -0700 | [diff] [blame] | 764 | out_unlock: |
Nathan Fontenot | 2938ffb | 2010-10-19 12:45:24 -0500 | [diff] [blame] | 765 | mutex_unlock(&mem_sysfs_mutex); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 766 | return 0; |
| 767 | } |
| 768 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 769 | int unregister_memory_section(struct mem_section *section) |
| 770 | { |
Andy Whitcroft | 540557b | 2007-10-16 01:24:11 -0700 | [diff] [blame] | 771 | if (!present_section(section)) |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 772 | return -EINVAL; |
| 773 | |
Seth Jennings | cc292b0 | 2016-01-14 15:20:24 -0800 | [diff] [blame] | 774 | return remove_memory_section(0, section, 0); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 775 | } |
David Rientjes | 4edd7ce | 2013-04-29 15:08:22 -0700 | [diff] [blame] | 776 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 777 | |
Yasuaki Ishimatsu | 6677e3e | 2013-02-22 16:32:52 -0800 | [diff] [blame] | 778 | /* return true if the memory block is offlined, otherwise, return false */ |
| 779 | bool is_memblock_offlined(struct memory_block *mem) |
| 780 | { |
| 781 | return mem->state == MEM_OFFLINE; |
| 782 | } |
| 783 | |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 784 | static struct attribute *memory_root_attrs[] = { |
| 785 | #ifdef CONFIG_ARCH_MEMORY_PROBE |
| 786 | &dev_attr_probe.attr, |
| 787 | #endif |
| 788 | |
| 789 | #ifdef CONFIG_MEMORY_FAILURE |
| 790 | &dev_attr_soft_offline_page.attr, |
| 791 | &dev_attr_hard_offline_page.attr, |
| 792 | #endif |
| 793 | |
| 794 | &dev_attr_block_size_bytes.attr, |
Vitaly Kuznetsov | 31bc385 | 2016-03-15 14:56:48 -0700 | [diff] [blame] | 795 | &dev_attr_auto_online_blocks.attr, |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 796 | NULL |
| 797 | }; |
| 798 | |
| 799 | static struct attribute_group memory_root_attr_group = { |
| 800 | .attrs = memory_root_attrs, |
| 801 | }; |
| 802 | |
| 803 | static const struct attribute_group *memory_root_attr_groups[] = { |
| 804 | &memory_root_attr_group, |
| 805 | NULL, |
| 806 | }; |
| 807 | |
Wen Congyang | e90bdb7 | 2012-10-08 16:34:01 -0700 | [diff] [blame] | 808 | /* |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 809 | * Initialize the sysfs support for memory devices... |
| 810 | */ |
| 811 | int __init memory_dev_init(void) |
| 812 | { |
| 813 | unsigned int i; |
| 814 | int ret; |
Andrew Morton | 28ec24e | 2006-12-06 20:37:29 -0800 | [diff] [blame] | 815 | int err; |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 816 | unsigned long block_sz; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 817 | |
Nathan Fontenot | 96b2c0f | 2013-06-04 14:42:28 -0500 | [diff] [blame] | 818 | ret = subsys_system_register(&memory_subsys, memory_root_attr_groups); |
Andrew Morton | 28ec24e | 2006-12-06 20:37:29 -0800 | [diff] [blame] | 819 | if (ret) |
| 820 | goto out; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 821 | |
Nathan Fontenot | 0c2c99b | 2011-01-20 10:43:34 -0600 | [diff] [blame] | 822 | block_sz = get_memory_block_size(); |
| 823 | sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; |
| 824 | |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 825 | /* |
| 826 | * Create entries for memory sections that were found |
| 827 | * during boot and have been initialized |
| 828 | */ |
Seth Jennings | b1eaef3 | 2013-08-20 12:12:57 -0500 | [diff] [blame] | 829 | mutex_lock(&mem_sysfs_mutex); |
Wei Yang | bc8755b | 2018-04-10 16:29:23 -0700 | [diff] [blame] | 830 | for (i = 0; i <= __highest_present_section_nr; |
| 831 | i += sections_per_block) { |
Seth Jennings | cb5e39b | 2013-08-20 12:13:03 -0500 | [diff] [blame] | 832 | err = add_memory_block(i); |
Andrew Morton | 28ec24e | 2006-12-06 20:37:29 -0800 | [diff] [blame] | 833 | if (!ret) |
| 834 | ret = err; |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 835 | } |
Seth Jennings | b1eaef3 | 2013-08-20 12:12:57 -0500 | [diff] [blame] | 836 | mutex_unlock(&mem_sysfs_mutex); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 837 | |
Andrew Morton | 28ec24e | 2006-12-06 20:37:29 -0800 | [diff] [blame] | 838 | out: |
| 839 | if (ret) |
Harvey Harrison | 2b3a302 | 2008-03-04 16:41:05 -0800 | [diff] [blame] | 840 | printk(KERN_ERR "%s() failed: %d\n", __func__, ret); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 841 | return ret; |
| 842 | } |