Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Support for Partition Mobility/Migration |
| 4 | * |
| 5 | * Copyright (C) 2010 Nathan Fontenot |
| 6 | * Copyright (C) 2010 IBM Corporation |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 7 | */ |
| 8 | |
Nathan Lynch | 494a66f | 2019-06-27 00:30:43 -0500 | [diff] [blame] | 9 | |
| 10 | #define pr_fmt(fmt) "mobility: " fmt |
| 11 | |
Nathan Lynch | e59a175 | 2019-06-11 23:45:05 -0500 | [diff] [blame] | 12 | #include <linux/cpu.h> |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 13 | #include <linux/kernel.h> |
| 14 | #include <linux/kobject.h> |
Nathan Lynch | 9327dc0 | 2020-12-07 15:51:44 -0600 | [diff] [blame] | 15 | #include <linux/nmi.h> |
Nathan Lynch | ccfb5bd | 2019-08-02 14:29:26 -0500 | [diff] [blame] | 16 | #include <linux/sched.h> |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 17 | #include <linux/smp.h> |
Paul Gortmaker | b56eade | 2011-05-27 13:27:45 -0400 | [diff] [blame] | 18 | #include <linux/stat.h> |
Nathan Lynch | 9327dc0 | 2020-12-07 15:51:44 -0600 | [diff] [blame] | 19 | #include <linux/stop_machine.h> |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 20 | #include <linux/completion.h> |
| 21 | #include <linux/device.h> |
| 22 | #include <linux/delay.h> |
| 23 | #include <linux/slab.h> |
Christophe Leroy | 5c35a02 | 2018-07-05 16:24:59 +0000 | [diff] [blame] | 24 | #include <linux/stringify.h> |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 25 | |
Michael Ellerman | 8e83e90 | 2014-07-16 12:02:43 +1000 | [diff] [blame] | 26 | #include <asm/machdep.h> |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 27 | #include <asm/rtas.h> |
| 28 | #include "pseries.h" |
Nathan Lynch | e610a46 | 2019-06-11 23:45:06 -0500 | [diff] [blame] | 29 | #include "../../kernel/cacheinfo.h" |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 30 | |
| 31 | static struct kobject *mobility_kobj; |
| 32 | |
| 33 | struct update_props_workarea { |
Tyrel Datwyler | f6ff041 | 2015-03-04 11:59:33 -0800 | [diff] [blame] | 34 | __be32 phandle; |
| 35 | __be32 state; |
| 36 | __be64 reserved; |
| 37 | __be32 nprops; |
Tyrel Datwyler | d0ef440 | 2013-08-14 22:23:47 -0700 | [diff] [blame] | 38 | } __packed; |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 39 | |
| 40 | #define NODE_ACTION_MASK 0xff000000 |
| 41 | #define NODE_COUNT_MASK 0x00ffffff |
| 42 | |
| 43 | #define DELETE_DT_NODE 0x01000000 |
| 44 | #define UPDATE_DT_NODE 0x02000000 |
| 45 | #define ADD_DT_NODE 0x03000000 |
| 46 | |
Nathan Fontenot | 762ec15 | 2013-04-24 05:47:11 +0000 | [diff] [blame] | 47 | #define MIGRATION_SCOPE (1) |
John Allen | 675d8ee | 2017-01-06 13:28:54 -0600 | [diff] [blame] | 48 | #define PRRN_SCOPE -2 |
Nathan Fontenot | 762ec15 | 2013-04-24 05:47:11 +0000 | [diff] [blame] | 49 | |
| 50 | static int mobility_rtas_call(int token, char *buf, s32 scope) |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 51 | { |
| 52 | int rc; |
| 53 | |
| 54 | spin_lock(&rtas_data_buf_lock); |
| 55 | |
| 56 | memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE); |
Nathan Fontenot | 762ec15 | 2013-04-24 05:47:11 +0000 | [diff] [blame] | 57 | rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 58 | memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); |
| 59 | |
| 60 | spin_unlock(&rtas_data_buf_lock); |
| 61 | return rc; |
| 62 | } |
| 63 | |
Nathan Lynch | 2efd7f6 | 2020-12-07 15:52:00 -0600 | [diff] [blame] | 64 | static int delete_dt_node(struct device_node *dn) |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 65 | { |
Nathan Lynch | 319fa1a | 2021-10-20 14:47:03 -0500 | [diff] [blame] | 66 | struct device_node *pdn; |
| 67 | bool is_platfac; |
| 68 | |
| 69 | pdn = of_get_parent(dn); |
| 70 | is_platfac = of_node_is_type(dn, "ibm,platform-facilities") || |
| 71 | of_node_is_type(pdn, "ibm,platform-facilities"); |
| 72 | of_node_put(pdn); |
| 73 | |
| 74 | /* |
| 75 | * The drivers that bind to nodes in the platform-facilities |
| 76 | * hierarchy don't support node removal, and the removal directive |
| 77 | * from firmware is always followed by an add of an equivalent |
| 78 | * node. The capability (e.g. RNG, encryption, compression) |
| 79 | * represented by the node is never interrupted by the migration. |
| 80 | * So ignore changes to this part of the tree. |
| 81 | */ |
| 82 | if (is_platfac) { |
| 83 | pr_notice("ignoring remove operation for %pOFfp\n", dn); |
| 84 | return 0; |
| 85 | } |
| 86 | |
Nathan Lynch | 5d8b1f9 | 2019-06-27 00:30:44 -0500 | [diff] [blame] | 87 | pr_debug("removing node %pOFfp\n", dn); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 88 | dlpar_detach_node(dn); |
| 89 | return 0; |
| 90 | } |
| 91 | |
| 92 | static int update_dt_property(struct device_node *dn, struct property **prop, |
| 93 | const char *name, u32 vd, char *value) |
| 94 | { |
| 95 | struct property *new_prop = *prop; |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 96 | int more = 0; |
| 97 | |
| 98 | /* A negative 'vd' value indicates that only part of the new property |
| 99 | * value is contained in the buffer and we need to call |
| 100 | * ibm,update-properties again to get the rest of the value. |
| 101 | * |
| 102 | * A negative value is also the two's compliment of the actual value. |
| 103 | */ |
| 104 | if (vd & 0x80000000) { |
| 105 | vd = ~vd + 1; |
| 106 | more = 1; |
| 107 | } |
| 108 | |
| 109 | if (new_prop) { |
| 110 | /* partial property fixup */ |
| 111 | char *new_data = kzalloc(new_prop->length + vd, GFP_KERNEL); |
| 112 | if (!new_data) |
| 113 | return -ENOMEM; |
| 114 | |
| 115 | memcpy(new_data, new_prop->value, new_prop->length); |
| 116 | memcpy(new_data + new_prop->length, value, vd); |
| 117 | |
| 118 | kfree(new_prop->value); |
| 119 | new_prop->value = new_data; |
| 120 | new_prop->length += vd; |
| 121 | } else { |
| 122 | new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); |
| 123 | if (!new_prop) |
| 124 | return -ENOMEM; |
| 125 | |
| 126 | new_prop->name = kstrdup(name, GFP_KERNEL); |
| 127 | if (!new_prop->name) { |
| 128 | kfree(new_prop); |
| 129 | return -ENOMEM; |
| 130 | } |
| 131 | |
| 132 | new_prop->length = vd; |
| 133 | new_prop->value = kzalloc(new_prop->length, GFP_KERNEL); |
| 134 | if (!new_prop->value) { |
| 135 | kfree(new_prop->name); |
| 136 | kfree(new_prop); |
| 137 | return -ENOMEM; |
| 138 | } |
| 139 | |
| 140 | memcpy(new_prop->value, value, vd); |
| 141 | *prop = new_prop; |
| 142 | } |
| 143 | |
| 144 | if (!more) { |
Nathan Lynch | 5d8b1f9 | 2019-06-27 00:30:44 -0500 | [diff] [blame] | 145 | pr_debug("updating node %pOF property %s\n", dn, name); |
Nathan Fontenot | 79d1c71 | 2012-10-02 16:58:46 +0000 | [diff] [blame] | 146 | of_update_property(dn, new_prop); |
Tyrel Datwyler | d8e533b | 2013-08-14 22:23:45 -0700 | [diff] [blame] | 147 | *prop = NULL; |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 148 | } |
| 149 | |
| 150 | return 0; |
| 151 | } |
| 152 | |
Nathan Lynch | 2efd7f6 | 2020-12-07 15:52:00 -0600 | [diff] [blame] | 153 | static int update_dt_node(struct device_node *dn, s32 scope) |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 154 | { |
| 155 | struct update_props_workarea *upwa; |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 156 | struct property *prop = NULL; |
Tyrel Datwyler | 638a405 | 2013-08-14 22:23:46 -0700 | [diff] [blame] | 157 | int i, rc, rtas_rc; |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 158 | char *prop_data; |
| 159 | char *rtas_buf; |
| 160 | int update_properties_token; |
Tyrel Datwyler | f6ff041 | 2015-03-04 11:59:33 -0800 | [diff] [blame] | 161 | u32 nprops; |
Nathan Fontenot | 2e9b7b0 | 2013-04-24 05:49:36 +0000 | [diff] [blame] | 162 | u32 vd; |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 163 | |
| 164 | update_properties_token = rtas_token("ibm,update-properties"); |
| 165 | if (update_properties_token == RTAS_UNKNOWN_SERVICE) |
| 166 | return -EINVAL; |
| 167 | |
| 168 | rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); |
| 169 | if (!rtas_buf) |
| 170 | return -ENOMEM; |
| 171 | |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 172 | upwa = (struct update_props_workarea *)&rtas_buf[0]; |
Nathan Lynch | 2efd7f6 | 2020-12-07 15:52:00 -0600 | [diff] [blame] | 173 | upwa->phandle = cpu_to_be32(dn->phandle); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 174 | |
| 175 | do { |
Tyrel Datwyler | 638a405 | 2013-08-14 22:23:46 -0700 | [diff] [blame] | 176 | rtas_rc = mobility_rtas_call(update_properties_token, rtas_buf, |
Nathan Fontenot | 762ec15 | 2013-04-24 05:47:11 +0000 | [diff] [blame] | 177 | scope); |
Tyrel Datwyler | 638a405 | 2013-08-14 22:23:46 -0700 | [diff] [blame] | 178 | if (rtas_rc < 0) |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 179 | break; |
| 180 | |
| 181 | prop_data = rtas_buf + sizeof(*upwa); |
Tyrel Datwyler | f6ff041 | 2015-03-04 11:59:33 -0800 | [diff] [blame] | 182 | nprops = be32_to_cpu(upwa->nprops); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 183 | |
Tyrel Datwyler | c8f5a57 | 2013-08-14 22:23:48 -0700 | [diff] [blame] | 184 | /* On the first call to ibm,update-properties for a node the |
| 185 | * the first property value descriptor contains an empty |
| 186 | * property name, the property value length encoded as u32, |
| 187 | * and the property value is the node path being updated. |
Nathan Fontenot | 2e9b7b0 | 2013-04-24 05:49:36 +0000 | [diff] [blame] | 188 | */ |
Tyrel Datwyler | c8f5a57 | 2013-08-14 22:23:48 -0700 | [diff] [blame] | 189 | if (*prop_data == 0) { |
| 190 | prop_data++; |
Tyrel Datwyler | f6ff041 | 2015-03-04 11:59:33 -0800 | [diff] [blame] | 191 | vd = be32_to_cpu(*(__be32 *)prop_data); |
Tyrel Datwyler | c8f5a57 | 2013-08-14 22:23:48 -0700 | [diff] [blame] | 192 | prop_data += vd + sizeof(vd); |
Tyrel Datwyler | f6ff041 | 2015-03-04 11:59:33 -0800 | [diff] [blame] | 193 | nprops--; |
Tyrel Datwyler | c8f5a57 | 2013-08-14 22:23:48 -0700 | [diff] [blame] | 194 | } |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 195 | |
Tyrel Datwyler | f6ff041 | 2015-03-04 11:59:33 -0800 | [diff] [blame] | 196 | for (i = 0; i < nprops; i++) { |
Nathan Fontenot | 2e9b7b0 | 2013-04-24 05:49:36 +0000 | [diff] [blame] | 197 | char *prop_name; |
| 198 | |
| 199 | prop_name = prop_data; |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 200 | prop_data += strlen(prop_name) + 1; |
Tyrel Datwyler | f6ff041 | 2015-03-04 11:59:33 -0800 | [diff] [blame] | 201 | vd = be32_to_cpu(*(__be32 *)prop_data); |
Nathan Fontenot | 2e9b7b0 | 2013-04-24 05:49:36 +0000 | [diff] [blame] | 202 | prop_data += sizeof(vd); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 203 | |
| 204 | switch (vd) { |
| 205 | case 0x00000000: |
| 206 | /* name only property, nothing to do */ |
| 207 | break; |
| 208 | |
| 209 | case 0x80000000: |
Suraj Jitindar Singh | 925e2d1 | 2016-04-28 15:34:55 +1000 | [diff] [blame] | 210 | of_remove_property(dn, of_find_property(dn, |
| 211 | prop_name, NULL)); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 212 | prop = NULL; |
| 213 | break; |
| 214 | |
| 215 | default: |
| 216 | rc = update_dt_property(dn, &prop, prop_name, |
| 217 | vd, prop_data); |
| 218 | if (rc) { |
Nathan Lynch | 2d5be6f | 2020-12-07 15:51:41 -0600 | [diff] [blame] | 219 | pr_err("updating %s property failed: %d\n", |
| 220 | prop_name, rc); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 221 | } |
| 222 | |
| 223 | prop_data += vd; |
Nathan Lynch | aa5e5c9 | 2020-12-07 15:51:40 -0600 | [diff] [blame] | 224 | break; |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 225 | } |
Nathan Lynch | ccfb5bd | 2019-08-02 14:29:26 -0500 | [diff] [blame] | 226 | |
| 227 | cond_resched(); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 228 | } |
Nathan Lynch | ccfb5bd | 2019-08-02 14:29:26 -0500 | [diff] [blame] | 229 | |
| 230 | cond_resched(); |
Tyrel Datwyler | 638a405 | 2013-08-14 22:23:46 -0700 | [diff] [blame] | 231 | } while (rtas_rc == 1); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 232 | |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 233 | kfree(rtas_buf); |
| 234 | return 0; |
| 235 | } |
| 236 | |
Nathan Lynch | 2efd7f6 | 2020-12-07 15:52:00 -0600 | [diff] [blame] | 237 | static int add_dt_node(struct device_node *parent_dn, __be32 drc_index) |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 238 | { |
| 239 | struct device_node *dn; |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 240 | int rc; |
| 241 | |
Tyrel Datwyler | 8d5ff32 | 2013-08-14 22:23:50 -0700 | [diff] [blame] | 242 | dn = dlpar_configure_connector(drc_index, parent_dn); |
Nathan Lynch | 2efd7f6 | 2020-12-07 15:52:00 -0600 | [diff] [blame] | 243 | if (!dn) |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 244 | return -ENOENT; |
| 245 | |
Nathan Lynch | 319fa1a | 2021-10-20 14:47:03 -0500 | [diff] [blame] | 246 | /* |
| 247 | * Since delete_dt_node() ignores this node type, this is the |
| 248 | * necessary counterpart. We also know that a platform-facilities |
| 249 | * node returned from dlpar_configure_connector() has children |
| 250 | * attached, and dlpar_attach_node() only adds the parent, leaking |
| 251 | * the children. So ignore these on the add side for now. |
| 252 | */ |
| 253 | if (of_node_is_type(dn, "ibm,platform-facilities")) { |
| 254 | pr_notice("ignoring add operation for %pOF\n", dn); |
| 255 | dlpar_free_cc_nodes(dn); |
| 256 | return 0; |
| 257 | } |
| 258 | |
Rob Herring | 215ee76 | 2017-08-21 10:16:49 -0500 | [diff] [blame] | 259 | rc = dlpar_attach_node(dn, parent_dn); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 260 | if (rc) |
| 261 | dlpar_free_cc_nodes(dn); |
| 262 | |
Nathan Lynch | 5d8b1f9 | 2019-06-27 00:30:44 -0500 | [diff] [blame] | 263 | pr_debug("added node %pOFfp\n", dn); |
| 264 | |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 265 | return rc; |
| 266 | } |
| 267 | |
Nathan Fontenot | 762ec15 | 2013-04-24 05:47:11 +0000 | [diff] [blame] | 268 | int pseries_devicetree_update(s32 scope) |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 269 | { |
| 270 | char *rtas_buf; |
Tyrel Datwyler | f6ff041 | 2015-03-04 11:59:33 -0800 | [diff] [blame] | 271 | __be32 *data; |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 272 | int update_nodes_token; |
| 273 | int rc; |
| 274 | |
| 275 | update_nodes_token = rtas_token("ibm,update-nodes"); |
| 276 | if (update_nodes_token == RTAS_UNKNOWN_SERVICE) |
Nathan Lynch | b06a671 | 2020-12-07 15:51:39 -0600 | [diff] [blame] | 277 | return 0; |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 278 | |
| 279 | rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); |
| 280 | if (!rtas_buf) |
| 281 | return -ENOMEM; |
| 282 | |
| 283 | do { |
Nathan Fontenot | 762ec15 | 2013-04-24 05:47:11 +0000 | [diff] [blame] | 284 | rc = mobility_rtas_call(update_nodes_token, rtas_buf, scope); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 285 | if (rc && rc != 1) |
| 286 | break; |
| 287 | |
Tyrel Datwyler | f6ff041 | 2015-03-04 11:59:33 -0800 | [diff] [blame] | 288 | data = (__be32 *)rtas_buf + 4; |
| 289 | while (be32_to_cpu(*data) & NODE_ACTION_MASK) { |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 290 | int i; |
Tyrel Datwyler | f6ff041 | 2015-03-04 11:59:33 -0800 | [diff] [blame] | 291 | u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK; |
| 292 | u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK; |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 293 | |
| 294 | data++; |
| 295 | |
| 296 | for (i = 0; i < node_count; i++) { |
Nathan Lynch | 2efd7f6 | 2020-12-07 15:52:00 -0600 | [diff] [blame] | 297 | struct device_node *np; |
Tyrel Datwyler | f6ff041 | 2015-03-04 11:59:33 -0800 | [diff] [blame] | 298 | __be32 phandle = *data++; |
| 299 | __be32 drc_index; |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 300 | |
Nathan Lynch | 2efd7f6 | 2020-12-07 15:52:00 -0600 | [diff] [blame] | 301 | np = of_find_node_by_phandle(be32_to_cpu(phandle)); |
| 302 | if (!np) { |
| 303 | pr_warn("Failed lookup: phandle 0x%x for action 0x%x\n", |
| 304 | be32_to_cpu(phandle), action); |
| 305 | continue; |
| 306 | } |
| 307 | |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 308 | switch (action) { |
| 309 | case DELETE_DT_NODE: |
Nathan Lynch | 2efd7f6 | 2020-12-07 15:52:00 -0600 | [diff] [blame] | 310 | delete_dt_node(np); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 311 | break; |
| 312 | case UPDATE_DT_NODE: |
Nathan Lynch | 2efd7f6 | 2020-12-07 15:52:00 -0600 | [diff] [blame] | 313 | update_dt_node(np, scope); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 314 | break; |
| 315 | case ADD_DT_NODE: |
| 316 | drc_index = *data++; |
Nathan Lynch | 2efd7f6 | 2020-12-07 15:52:00 -0600 | [diff] [blame] | 317 | add_dt_node(np, drc_index); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 318 | break; |
| 319 | } |
Nathan Lynch | ccfb5bd | 2019-08-02 14:29:26 -0500 | [diff] [blame] | 320 | |
Nathan Lynch | 2efd7f6 | 2020-12-07 15:52:00 -0600 | [diff] [blame] | 321 | of_node_put(np); |
Nathan Lynch | ccfb5bd | 2019-08-02 14:29:26 -0500 | [diff] [blame] | 322 | cond_resched(); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 323 | } |
| 324 | } |
Nathan Lynch | ccfb5bd | 2019-08-02 14:29:26 -0500 | [diff] [blame] | 325 | |
| 326 | cond_resched(); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 327 | } while (rc == 1); |
| 328 | |
| 329 | kfree(rtas_buf); |
| 330 | return rc; |
| 331 | } |
| 332 | |
| 333 | void post_mobility_fixup(void) |
| 334 | { |
| 335 | int rc; |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 336 | |
Nathan Lynch | c3ae978 | 2020-12-07 15:51:42 -0600 | [diff] [blame] | 337 | rtas_activate_firmware(); |
Haren Myneni | 39a33b5 | 2014-02-19 12:56:52 -0800 | [diff] [blame] | 338 | |
Nathan Lynch | e59a175 | 2019-06-11 23:45:05 -0500 | [diff] [blame] | 339 | /* |
| 340 | * We don't want CPUs to go online/offline while the device |
| 341 | * tree is being updated. |
| 342 | */ |
| 343 | cpus_read_lock(); |
| 344 | |
Nathan Lynch | e610a46 | 2019-06-11 23:45:06 -0500 | [diff] [blame] | 345 | /* |
| 346 | * It's common for the destination firmware to replace cache |
| 347 | * nodes. Release all of the cacheinfo hierarchy's references |
| 348 | * before updating the device tree. |
| 349 | */ |
| 350 | cacheinfo_teardown(); |
| 351 | |
Haren Myneni | 39a33b5 | 2014-02-19 12:56:52 -0800 | [diff] [blame] | 352 | rc = pseries_devicetree_update(MIGRATION_SCOPE); |
| 353 | if (rc) |
Nathan Lynch | 2d5be6f | 2020-12-07 15:51:41 -0600 | [diff] [blame] | 354 | pr_err("device tree update failed: %d\n", rc); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 355 | |
Nathan Lynch | e610a46 | 2019-06-11 23:45:06 -0500 | [diff] [blame] | 356 | cacheinfo_rebuild(); |
| 357 | |
Nathan Lynch | e59a175 | 2019-06-11 23:45:05 -0500 | [diff] [blame] | 358 | cpus_read_unlock(); |
| 359 | |
Daniel Axtens | da631f7 | 2020-11-17 16:59:16 +1100 | [diff] [blame] | 360 | /* Possibly switch to a new L1 flush type */ |
| 361 | pseries_setup_security_mitigations(); |
Michael Ellerman | 921bc6c | 2018-03-14 19:40:42 -0300 | [diff] [blame] | 362 | |
Kajol Jain | 373b373 | 2020-05-25 16:13:07 +0530 | [diff] [blame] | 363 | /* Reinitialise system information for hv-24x7 */ |
| 364 | read_24x7_sys_info(); |
| 365 | |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 366 | return; |
| 367 | } |
| 368 | |
Nathan Lynch | d921331 | 2020-12-07 15:51:43 -0600 | [diff] [blame] | 369 | static int poll_vasi_state(u64 handle, unsigned long *res) |
| 370 | { |
| 371 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; |
| 372 | long hvrc; |
| 373 | int ret; |
| 374 | |
| 375 | hvrc = plpar_hcall(H_VASI_STATE, retbuf, handle); |
| 376 | switch (hvrc) { |
| 377 | case H_SUCCESS: |
| 378 | ret = 0; |
| 379 | *res = retbuf[0]; |
| 380 | break; |
| 381 | case H_PARAMETER: |
| 382 | ret = -EINVAL; |
| 383 | break; |
| 384 | case H_FUNCTION: |
| 385 | ret = -EOPNOTSUPP; |
| 386 | break; |
| 387 | case H_HARDWARE: |
| 388 | default: |
| 389 | pr_err("unexpected H_VASI_STATE result %ld\n", hvrc); |
| 390 | ret = -EIO; |
| 391 | break; |
| 392 | } |
| 393 | return ret; |
| 394 | } |
| 395 | |
| 396 | static int wait_for_vasi_session_suspending(u64 handle) |
| 397 | { |
| 398 | unsigned long state; |
| 399 | int ret; |
| 400 | |
| 401 | /* |
| 402 | * Wait for transition from H_VASI_ENABLED to |
| 403 | * H_VASI_SUSPENDING. Treat anything else as an error. |
| 404 | */ |
| 405 | while (true) { |
| 406 | ret = poll_vasi_state(handle, &state); |
| 407 | |
| 408 | if (ret != 0 || state == H_VASI_SUSPENDING) { |
| 409 | break; |
| 410 | } else if (state == H_VASI_ENABLED) { |
| 411 | ssleep(1); |
| 412 | } else { |
| 413 | pr_err("unexpected H_VASI_STATE result %lu\n", state); |
| 414 | ret = -EIO; |
| 415 | break; |
| 416 | } |
| 417 | } |
| 418 | |
| 419 | /* |
| 420 | * Proceed even if H_VASI_STATE is unavailable. If H_JOIN or |
| 421 | * ibm,suspend-me are also unimplemented, we'll recover then. |
| 422 | */ |
| 423 | if (ret == -EOPNOTSUPP) |
| 424 | ret = 0; |
| 425 | |
| 426 | return ret; |
| 427 | } |
| 428 | |
Nathan Lynch | 9327dc0 | 2020-12-07 15:51:44 -0600 | [diff] [blame] | 429 | static void prod_single(unsigned int target_cpu) |
| 430 | { |
| 431 | long hvrc; |
| 432 | int hwid; |
| 433 | |
| 434 | hwid = get_hard_smp_processor_id(target_cpu); |
| 435 | hvrc = plpar_hcall_norets(H_PROD, hwid); |
| 436 | if (hvrc == H_SUCCESS) |
| 437 | return; |
| 438 | pr_err_ratelimited("H_PROD of CPU %u (hwid %d) error: %ld\n", |
| 439 | target_cpu, hwid, hvrc); |
| 440 | } |
| 441 | |
| 442 | static void prod_others(void) |
| 443 | { |
| 444 | unsigned int cpu; |
| 445 | |
| 446 | for_each_online_cpu(cpu) { |
| 447 | if (cpu != smp_processor_id()) |
| 448 | prod_single(cpu); |
| 449 | } |
| 450 | } |
| 451 | |
| 452 | static u16 clamp_slb_size(void) |
| 453 | { |
Nicholas Piggin | 387e220 | 2021-12-02 00:41:52 +1000 | [diff] [blame] | 454 | #ifdef CONFIG_PPC_64S_HASH_MMU |
Nathan Lynch | 9327dc0 | 2020-12-07 15:51:44 -0600 | [diff] [blame] | 455 | u16 prev = mmu_slb_size; |
| 456 | |
| 457 | slb_set_size(SLB_MIN_SIZE); |
| 458 | |
| 459 | return prev; |
Nicholas Piggin | 387e220 | 2021-12-02 00:41:52 +1000 | [diff] [blame] | 460 | #else |
| 461 | return 0; |
| 462 | #endif |
Nathan Lynch | 9327dc0 | 2020-12-07 15:51:44 -0600 | [diff] [blame] | 463 | } |
| 464 | |
| 465 | static int do_suspend(void) |
| 466 | { |
| 467 | u16 saved_slb_size; |
| 468 | int status; |
| 469 | int ret; |
| 470 | |
| 471 | pr_info("calling ibm,suspend-me on CPU %i\n", smp_processor_id()); |
| 472 | |
| 473 | /* |
| 474 | * The destination processor model may have fewer SLB entries |
| 475 | * than the source. We reduce mmu_slb_size to a safe minimum |
| 476 | * before suspending in order to minimize the possibility of |
| 477 | * programming non-existent entries on the destination. If |
| 478 | * suspend fails, we restore it before returning. On success |
| 479 | * the OF reconfig path will update it from the new device |
| 480 | * tree after resuming on the destination. |
| 481 | */ |
| 482 | saved_slb_size = clamp_slb_size(); |
| 483 | |
| 484 | ret = rtas_ibm_suspend_me(&status); |
| 485 | if (ret != 0) { |
| 486 | pr_err("ibm,suspend-me error: %d\n", status); |
| 487 | slb_set_size(saved_slb_size); |
| 488 | } |
| 489 | |
| 490 | return ret; |
| 491 | } |
| 492 | |
Nathan Lynch | e834df6 | 2021-03-15 03:00:44 -0500 | [diff] [blame] | 493 | /** |
| 494 | * struct pseries_suspend_info - State shared between CPUs for join/suspend. |
| 495 | * @counter: Threads are to increment this upon resuming from suspend |
| 496 | * or if an error is received from H_JOIN. The thread which performs |
| 497 | * the first increment (i.e. sets it to 1) is responsible for |
| 498 | * waking the other threads. |
Nathan Lynch | 274cb1c | 2021-03-15 03:00:45 -0500 | [diff] [blame] | 499 | * @done: False if join/suspend is in progress. True if the operation is |
| 500 | * complete (successful or not). |
Nathan Lynch | e834df6 | 2021-03-15 03:00:44 -0500 | [diff] [blame] | 501 | */ |
| 502 | struct pseries_suspend_info { |
| 503 | atomic_t counter; |
Nathan Lynch | 274cb1c | 2021-03-15 03:00:45 -0500 | [diff] [blame] | 504 | bool done; |
Nathan Lynch | e834df6 | 2021-03-15 03:00:44 -0500 | [diff] [blame] | 505 | }; |
| 506 | |
Nathan Lynch | 9327dc0 | 2020-12-07 15:51:44 -0600 | [diff] [blame] | 507 | static int do_join(void *arg) |
| 508 | { |
Nathan Lynch | e834df6 | 2021-03-15 03:00:44 -0500 | [diff] [blame] | 509 | struct pseries_suspend_info *info = arg; |
| 510 | atomic_t *counter = &info->counter; |
Nathan Lynch | 9327dc0 | 2020-12-07 15:51:44 -0600 | [diff] [blame] | 511 | long hvrc; |
| 512 | int ret; |
| 513 | |
Nathan Lynch | 274cb1c | 2021-03-15 03:00:45 -0500 | [diff] [blame] | 514 | retry: |
Nathan Lynch | 9327dc0 | 2020-12-07 15:51:44 -0600 | [diff] [blame] | 515 | /* Must ensure MSR.EE off for H_JOIN. */ |
| 516 | hard_irq_disable(); |
| 517 | hvrc = plpar_hcall_norets(H_JOIN); |
| 518 | |
| 519 | switch (hvrc) { |
| 520 | case H_CONTINUE: |
| 521 | /* |
| 522 | * All other CPUs are offline or in H_JOIN. This CPU |
| 523 | * attempts the suspend. |
| 524 | */ |
| 525 | ret = do_suspend(); |
| 526 | break; |
| 527 | case H_SUCCESS: |
| 528 | /* |
| 529 | * The suspend is complete and this cpu has received a |
Nathan Lynch | 274cb1c | 2021-03-15 03:00:45 -0500 | [diff] [blame] | 530 | * prod, or we've received a stray prod from unrelated |
| 531 | * code (e.g. paravirt spinlocks) and we need to join |
| 532 | * again. |
| 533 | * |
| 534 | * This barrier orders the return from H_JOIN above vs |
| 535 | * the load of info->done. It pairs with the barrier |
| 536 | * in the wakeup/prod path below. |
Nathan Lynch | 9327dc0 | 2020-12-07 15:51:44 -0600 | [diff] [blame] | 537 | */ |
Nathan Lynch | 274cb1c | 2021-03-15 03:00:45 -0500 | [diff] [blame] | 538 | smp_mb(); |
| 539 | if (READ_ONCE(info->done) == false) { |
| 540 | pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying", |
| 541 | smp_processor_id()); |
| 542 | goto retry; |
| 543 | } |
Nathan Lynch | 9327dc0 | 2020-12-07 15:51:44 -0600 | [diff] [blame] | 544 | ret = 0; |
| 545 | break; |
| 546 | case H_BAD_MODE: |
| 547 | case H_HARDWARE: |
| 548 | default: |
| 549 | ret = -EIO; |
| 550 | pr_err_ratelimited("H_JOIN error %ld on CPU %i\n", |
| 551 | hvrc, smp_processor_id()); |
| 552 | break; |
| 553 | } |
| 554 | |
| 555 | if (atomic_inc_return(counter) == 1) { |
| 556 | pr_info("CPU %u waking all threads\n", smp_processor_id()); |
Nathan Lynch | 274cb1c | 2021-03-15 03:00:45 -0500 | [diff] [blame] | 557 | WRITE_ONCE(info->done, true); |
| 558 | /* |
| 559 | * This barrier orders the store to info->done vs subsequent |
| 560 | * H_PRODs to wake the other CPUs. It pairs with the barrier |
| 561 | * in the H_SUCCESS case above. |
| 562 | */ |
| 563 | smp_mb(); |
Nathan Lynch | 9327dc0 | 2020-12-07 15:51:44 -0600 | [diff] [blame] | 564 | prod_others(); |
| 565 | } |
| 566 | /* |
| 567 | * Execution may have been suspended for several seconds, so |
| 568 | * reset the watchdog. |
| 569 | */ |
| 570 | touch_nmi_watchdog(); |
| 571 | return ret; |
| 572 | } |
| 573 | |
Nathan Lynch | 37cddc7 | 2020-12-07 15:51:45 -0600 | [diff] [blame] | 574 | /* |
| 575 | * Abort reason code byte 0. We use only the 'Migrating partition' value. |
| 576 | */ |
| 577 | enum vasi_aborting_entity { |
| 578 | ORCHESTRATOR = 1, |
| 579 | VSP_SOURCE = 2, |
| 580 | PARTITION_FIRMWARE = 3, |
| 581 | PLATFORM_FIRMWARE = 4, |
| 582 | VSP_TARGET = 5, |
| 583 | MIGRATING_PARTITION = 6, |
| 584 | }; |
| 585 | |
| 586 | static void pseries_cancel_migration(u64 handle, int err) |
| 587 | { |
| 588 | u32 reason_code; |
| 589 | u32 detail; |
| 590 | u8 entity; |
| 591 | long hvrc; |
| 592 | |
| 593 | entity = MIGRATING_PARTITION; |
| 594 | detail = abs(err) & 0xffffff; |
| 595 | reason_code = (entity << 24) | detail; |
| 596 | |
| 597 | hvrc = plpar_hcall_norets(H_VASI_SIGNAL, handle, |
| 598 | H_VASI_SIGNAL_CANCEL, reason_code); |
| 599 | if (hvrc) |
| 600 | pr_err("H_VASI_SIGNAL error: %ld\n", hvrc); |
| 601 | } |
| 602 | |
Nathan Lynch | aeca35b | 2020-12-07 15:51:46 -0600 | [diff] [blame] | 603 | static int pseries_suspend(u64 handle) |
| 604 | { |
| 605 | const unsigned int max_attempts = 5; |
| 606 | unsigned int retry_interval_ms = 1; |
| 607 | unsigned int attempt = 1; |
| 608 | int ret; |
| 609 | |
| 610 | while (true) { |
Nathan Lynch | e834df6 | 2021-03-15 03:00:44 -0500 | [diff] [blame] | 611 | struct pseries_suspend_info info; |
Nathan Lynch | aeca35b | 2020-12-07 15:51:46 -0600 | [diff] [blame] | 612 | unsigned long vasi_state; |
| 613 | int vasi_err; |
| 614 | |
Nathan Lynch | e834df6 | 2021-03-15 03:00:44 -0500 | [diff] [blame] | 615 | info = (struct pseries_suspend_info) { |
| 616 | .counter = ATOMIC_INIT(0), |
Nathan Lynch | 274cb1c | 2021-03-15 03:00:45 -0500 | [diff] [blame] | 617 | .done = false, |
Nathan Lynch | e834df6 | 2021-03-15 03:00:44 -0500 | [diff] [blame] | 618 | }; |
| 619 | |
| 620 | ret = stop_machine(do_join, &info, cpu_online_mask); |
Nathan Lynch | aeca35b | 2020-12-07 15:51:46 -0600 | [diff] [blame] | 621 | if (ret == 0) |
| 622 | break; |
| 623 | /* |
| 624 | * Encountered an error. If the VASI stream is still |
| 625 | * in Suspending state, it's likely a transient |
| 626 | * condition related to some device in the partition |
| 627 | * and we can retry in the hope that the cause has |
| 628 | * cleared after some delay. |
| 629 | * |
| 630 | * A better design would allow drivers etc to prepare |
| 631 | * for the suspend and avoid conditions which prevent |
| 632 | * the suspend from succeeding. For now, we have this |
| 633 | * mitigation. |
| 634 | */ |
| 635 | pr_notice("Partition suspend attempt %u of %u error: %d\n", |
| 636 | attempt, max_attempts, ret); |
| 637 | |
| 638 | if (attempt == max_attempts) |
| 639 | break; |
| 640 | |
| 641 | vasi_err = poll_vasi_state(handle, &vasi_state); |
| 642 | if (vasi_err == 0) { |
| 643 | if (vasi_state != H_VASI_SUSPENDING) { |
| 644 | pr_notice("VASI state %lu after failed suspend\n", |
| 645 | vasi_state); |
| 646 | break; |
| 647 | } |
| 648 | } else if (vasi_err != -EOPNOTSUPP) { |
| 649 | pr_err("VASI state poll error: %d", vasi_err); |
| 650 | break; |
| 651 | } |
| 652 | |
| 653 | pr_notice("Will retry partition suspend after %u ms\n", |
| 654 | retry_interval_ms); |
| 655 | |
| 656 | msleep(retry_interval_ms); |
| 657 | retry_interval_ms *= 10; |
| 658 | attempt++; |
| 659 | } |
| 660 | |
| 661 | return ret; |
| 662 | } |
| 663 | |
Nathan Lynch | 9327dc0 | 2020-12-07 15:51:44 -0600 | [diff] [blame] | 664 | static int pseries_migrate_partition(u64 handle) |
| 665 | { |
Nathan Lynch | 9327dc0 | 2020-12-07 15:51:44 -0600 | [diff] [blame] | 666 | int ret; |
| 667 | |
| 668 | ret = wait_for_vasi_session_suspending(handle); |
| 669 | if (ret) |
| 670 | return ret; |
| 671 | |
Nathan Lynch | aeca35b | 2020-12-07 15:51:46 -0600 | [diff] [blame] | 672 | ret = pseries_suspend(handle); |
Nathan Lynch | 9327dc0 | 2020-12-07 15:51:44 -0600 | [diff] [blame] | 673 | if (ret == 0) |
| 674 | post_mobility_fixup(); |
Nathan Lynch | 37cddc7 | 2020-12-07 15:51:45 -0600 | [diff] [blame] | 675 | else |
| 676 | pseries_cancel_migration(handle, ret); |
Nathan Lynch | 9327dc0 | 2020-12-07 15:51:44 -0600 | [diff] [blame] | 677 | |
| 678 | return ret; |
| 679 | } |
| 680 | |
Nathan Lynch | 4d75689 | 2020-12-07 15:51:47 -0600 | [diff] [blame] | 681 | int rtas_syscall_dispatch_ibm_suspend_me(u64 handle) |
| 682 | { |
| 683 | return pseries_migrate_partition(handle); |
| 684 | } |
| 685 | |
Greg Kroah-Hartman | 6f42809 | 2017-06-06 15:32:03 +0200 | [diff] [blame] | 686 | static ssize_t migration_store(struct class *class, |
| 687 | struct class_attribute *attr, const char *buf, |
| 688 | size_t count) |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 689 | { |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 690 | u64 streamid; |
| 691 | int rc; |
| 692 | |
Daniel Walter | 1618bd5 | 2014-08-08 14:24:01 -0700 | [diff] [blame] | 693 | rc = kstrtou64(buf, 0, &streamid); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 694 | if (rc) |
| 695 | return rc; |
| 696 | |
Nathan Lynch | 9327dc0 | 2020-12-07 15:51:44 -0600 | [diff] [blame] | 697 | rc = pseries_migrate_partition(streamid); |
Nathan Lynch | d921331 | 2020-12-07 15:51:43 -0600 | [diff] [blame] | 698 | if (rc) |
| 699 | return rc; |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 700 | |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 701 | return count; |
| 702 | } |
| 703 | |
Tyrel Datwyler | 288a298 | 2015-03-04 18:25:38 -0800 | [diff] [blame] | 704 | /* |
| 705 | * Used by drmgr to determine the kernel behavior of the migration interface. |
| 706 | * |
| 707 | * Version 1: Performs all PAPR requirements for migration including |
| 708 | * firmware activation and device tree update. |
| 709 | */ |
| 710 | #define MIGRATION_API_VERSION 1 |
| 711 | |
Greg Kroah-Hartman | 6f42809 | 2017-06-06 15:32:03 +0200 | [diff] [blame] | 712 | static CLASS_ATTR_WO(migration); |
Russell Currey | 57ad583f | 2017-01-12 14:54:13 +1100 | [diff] [blame] | 713 | static CLASS_ATTR_STRING(api_version, 0444, __stringify(MIGRATION_API_VERSION)); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 714 | |
| 715 | static int __init mobility_sysfs_init(void) |
| 716 | { |
| 717 | int rc; |
| 718 | |
| 719 | mobility_kobj = kobject_create_and_add("mobility", kernel_kobj); |
| 720 | if (!mobility_kobj) |
| 721 | return -ENOMEM; |
| 722 | |
| 723 | rc = sysfs_create_file(mobility_kobj, &class_attr_migration.attr); |
Tyrel Datwyler | 288a298 | 2015-03-04 18:25:38 -0800 | [diff] [blame] | 724 | if (rc) |
Nathan Lynch | 494a66f | 2019-06-27 00:30:43 -0500 | [diff] [blame] | 725 | pr_err("unable to create migration sysfs file (%d)\n", rc); |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 726 | |
Tyrel Datwyler | 288a298 | 2015-03-04 18:25:38 -0800 | [diff] [blame] | 727 | rc = sysfs_create_file(mobility_kobj, &class_attr_api_version.attr.attr); |
| 728 | if (rc) |
Nathan Lynch | 494a66f | 2019-06-27 00:30:43 -0500 | [diff] [blame] | 729 | pr_err("unable to create api_version sysfs file (%d)\n", rc); |
Tyrel Datwyler | 288a298 | 2015-03-04 18:25:38 -0800 | [diff] [blame] | 730 | |
| 731 | return 0; |
Nathan Fontenot | 410bccf | 2010-09-10 09:42:36 +0000 | [diff] [blame] | 732 | } |
Michael Ellerman | 8e83e90 | 2014-07-16 12:02:43 +1000 | [diff] [blame] | 733 | machine_device_initcall(pseries, mobility_sysfs_init); |