Sudeep Holla | 95a15d8 | 2019-07-08 09:41:06 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * System Control and Management Interface (SCMI) Reset Protocol |
| 4 | * |
| 5 | * Copyright (C) 2019 ARM Ltd. |
| 6 | */ |
| 7 | |
| 8 | #include "common.h" |
| 9 | |
| 10 | enum scmi_reset_protocol_cmd { |
| 11 | RESET_DOMAIN_ATTRIBUTES = 0x3, |
| 12 | RESET = 0x4, |
| 13 | RESET_NOTIFY = 0x5, |
| 14 | }; |
| 15 | |
| 16 | enum scmi_reset_protocol_notify { |
| 17 | RESET_ISSUED = 0x0, |
| 18 | }; |
| 19 | |
| 20 | #define NUM_RESET_DOMAIN_MASK 0xffff |
| 21 | #define RESET_NOTIFY_ENABLE BIT(0) |
| 22 | |
| 23 | struct scmi_msg_resp_reset_domain_attributes { |
| 24 | __le32 attributes; |
| 25 | #define SUPPORTS_ASYNC_RESET(x) ((x) & BIT(31)) |
| 26 | #define SUPPORTS_NOTIFY_RESET(x) ((x) & BIT(30)) |
| 27 | __le32 latency; |
| 28 | u8 name[SCMI_MAX_STR_SIZE]; |
| 29 | }; |
| 30 | |
| 31 | struct scmi_msg_reset_domain_reset { |
| 32 | __le32 domain_id; |
| 33 | __le32 flags; |
| 34 | #define AUTONOMOUS_RESET BIT(0) |
| 35 | #define EXPLICIT_RESET_ASSERT BIT(1) |
| 36 | #define ASYNCHRONOUS_RESET BIT(2) |
| 37 | __le32 reset_state; |
| 38 | #define ARCH_RESET_TYPE BIT(31) |
| 39 | #define COLD_RESET_STATE BIT(0) |
| 40 | #define ARCH_COLD_RESET (ARCH_RESET_TYPE | COLD_RESET_STATE) |
| 41 | }; |
| 42 | |
| 43 | struct reset_dom_info { |
| 44 | bool async_reset; |
| 45 | bool reset_notify; |
| 46 | u32 latency_us; |
| 47 | char name[SCMI_MAX_STR_SIZE]; |
| 48 | }; |
| 49 | |
| 50 | struct scmi_reset_info { |
Sudeep Holla | b55b06b | 2019-11-22 14:48:40 +0000 | [diff] [blame] | 51 | u32 version; |
Sudeep Holla | 95a15d8 | 2019-07-08 09:41:06 +0100 | [diff] [blame] | 52 | int num_domains; |
| 53 | struct reset_dom_info *dom_info; |
| 54 | }; |
| 55 | |
| 56 | static int scmi_reset_attributes_get(const struct scmi_handle *handle, |
| 57 | struct scmi_reset_info *pi) |
| 58 | { |
| 59 | int ret; |
| 60 | struct scmi_xfer *t; |
| 61 | u32 attr; |
| 62 | |
| 63 | ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES, |
| 64 | SCMI_PROTOCOL_RESET, 0, sizeof(attr), &t); |
| 65 | if (ret) |
| 66 | return ret; |
| 67 | |
| 68 | ret = scmi_do_xfer(handle, t); |
| 69 | if (!ret) { |
| 70 | attr = get_unaligned_le32(t->rx.buf); |
| 71 | pi->num_domains = attr & NUM_RESET_DOMAIN_MASK; |
| 72 | } |
| 73 | |
| 74 | scmi_xfer_put(handle, t); |
| 75 | return ret; |
| 76 | } |
| 77 | |
| 78 | static int |
| 79 | scmi_reset_domain_attributes_get(const struct scmi_handle *handle, u32 domain, |
| 80 | struct reset_dom_info *dom_info) |
| 81 | { |
| 82 | int ret; |
| 83 | struct scmi_xfer *t; |
| 84 | struct scmi_msg_resp_reset_domain_attributes *attr; |
| 85 | |
| 86 | ret = scmi_xfer_get_init(handle, RESET_DOMAIN_ATTRIBUTES, |
| 87 | SCMI_PROTOCOL_RESET, sizeof(domain), |
| 88 | sizeof(*attr), &t); |
| 89 | if (ret) |
| 90 | return ret; |
| 91 | |
| 92 | put_unaligned_le32(domain, t->tx.buf); |
| 93 | attr = t->rx.buf; |
| 94 | |
| 95 | ret = scmi_do_xfer(handle, t); |
| 96 | if (!ret) { |
| 97 | u32 attributes = le32_to_cpu(attr->attributes); |
| 98 | |
| 99 | dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes); |
| 100 | dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes); |
| 101 | dom_info->latency_us = le32_to_cpu(attr->latency); |
| 102 | if (dom_info->latency_us == U32_MAX) |
| 103 | dom_info->latency_us = 0; |
| 104 | strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); |
| 105 | } |
| 106 | |
| 107 | scmi_xfer_put(handle, t); |
| 108 | return ret; |
| 109 | } |
| 110 | |
| 111 | static int scmi_reset_num_domains_get(const struct scmi_handle *handle) |
| 112 | { |
| 113 | struct scmi_reset_info *pi = handle->reset_priv; |
| 114 | |
| 115 | return pi->num_domains; |
| 116 | } |
| 117 | |
| 118 | static char *scmi_reset_name_get(const struct scmi_handle *handle, u32 domain) |
| 119 | { |
| 120 | struct scmi_reset_info *pi = handle->reset_priv; |
| 121 | struct reset_dom_info *dom = pi->dom_info + domain; |
| 122 | |
| 123 | return dom->name; |
| 124 | } |
| 125 | |
| 126 | static int scmi_reset_latency_get(const struct scmi_handle *handle, u32 domain) |
| 127 | { |
| 128 | struct scmi_reset_info *pi = handle->reset_priv; |
| 129 | struct reset_dom_info *dom = pi->dom_info + domain; |
| 130 | |
| 131 | return dom->latency_us; |
| 132 | } |
| 133 | |
| 134 | static int scmi_domain_reset(const struct scmi_handle *handle, u32 domain, |
| 135 | u32 flags, u32 state) |
| 136 | { |
| 137 | int ret; |
| 138 | struct scmi_xfer *t; |
| 139 | struct scmi_msg_reset_domain_reset *dom; |
| 140 | struct scmi_reset_info *pi = handle->reset_priv; |
| 141 | struct reset_dom_info *rdom = pi->dom_info + domain; |
| 142 | |
| 143 | if (rdom->async_reset) |
| 144 | flags |= ASYNCHRONOUS_RESET; |
| 145 | |
| 146 | ret = scmi_xfer_get_init(handle, RESET, SCMI_PROTOCOL_RESET, |
| 147 | sizeof(*dom), 0, &t); |
| 148 | if (ret) |
| 149 | return ret; |
| 150 | |
| 151 | dom = t->tx.buf; |
| 152 | dom->domain_id = cpu_to_le32(domain); |
| 153 | dom->flags = cpu_to_le32(flags); |
Sudeep Holla | 11ed5cf | 2019-09-09 16:21:30 +0100 | [diff] [blame] | 154 | dom->reset_state = cpu_to_le32(state); |
Sudeep Holla | 95a15d8 | 2019-07-08 09:41:06 +0100 | [diff] [blame] | 155 | |
| 156 | if (rdom->async_reset) |
| 157 | ret = scmi_do_xfer_with_response(handle, t); |
| 158 | else |
| 159 | ret = scmi_do_xfer(handle, t); |
| 160 | |
| 161 | scmi_xfer_put(handle, t); |
| 162 | return ret; |
| 163 | } |
| 164 | |
| 165 | static int scmi_reset_domain_reset(const struct scmi_handle *handle, u32 domain) |
| 166 | { |
| 167 | return scmi_domain_reset(handle, domain, AUTONOMOUS_RESET, |
| 168 | ARCH_COLD_RESET); |
| 169 | } |
| 170 | |
| 171 | static int |
| 172 | scmi_reset_domain_assert(const struct scmi_handle *handle, u32 domain) |
| 173 | { |
| 174 | return scmi_domain_reset(handle, domain, EXPLICIT_RESET_ASSERT, |
| 175 | ARCH_COLD_RESET); |
| 176 | } |
| 177 | |
| 178 | static int |
| 179 | scmi_reset_domain_deassert(const struct scmi_handle *handle, u32 domain) |
| 180 | { |
| 181 | return scmi_domain_reset(handle, domain, 0, ARCH_COLD_RESET); |
| 182 | } |
| 183 | |
| 184 | static struct scmi_reset_ops reset_ops = { |
| 185 | .num_domains_get = scmi_reset_num_domains_get, |
| 186 | .name_get = scmi_reset_name_get, |
| 187 | .latency_get = scmi_reset_latency_get, |
| 188 | .reset = scmi_reset_domain_reset, |
| 189 | .assert = scmi_reset_domain_assert, |
| 190 | .deassert = scmi_reset_domain_deassert, |
| 191 | }; |
| 192 | |
| 193 | static int scmi_reset_protocol_init(struct scmi_handle *handle) |
| 194 | { |
| 195 | int domain; |
| 196 | u32 version; |
| 197 | struct scmi_reset_info *pinfo; |
| 198 | |
| 199 | scmi_version_get(handle, SCMI_PROTOCOL_RESET, &version); |
| 200 | |
| 201 | dev_dbg(handle->dev, "Reset Version %d.%d\n", |
| 202 | PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); |
| 203 | |
| 204 | pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL); |
| 205 | if (!pinfo) |
| 206 | return -ENOMEM; |
| 207 | |
| 208 | scmi_reset_attributes_get(handle, pinfo); |
| 209 | |
| 210 | pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains, |
| 211 | sizeof(*pinfo->dom_info), GFP_KERNEL); |
| 212 | if (!pinfo->dom_info) |
| 213 | return -ENOMEM; |
| 214 | |
| 215 | for (domain = 0; domain < pinfo->num_domains; domain++) { |
| 216 | struct reset_dom_info *dom = pinfo->dom_info + domain; |
| 217 | |
| 218 | scmi_reset_domain_attributes_get(handle, domain, dom); |
| 219 | } |
| 220 | |
Sudeep Holla | b55b06b | 2019-11-22 14:48:40 +0000 | [diff] [blame] | 221 | pinfo->version = version; |
Sudeep Holla | 95a15d8 | 2019-07-08 09:41:06 +0100 | [diff] [blame] | 222 | handle->reset_ops = &reset_ops; |
| 223 | handle->reset_priv = pinfo; |
| 224 | |
| 225 | return 0; |
| 226 | } |
| 227 | |
| 228 | static int __init scmi_reset_init(void) |
| 229 | { |
| 230 | return scmi_protocol_register(SCMI_PROTOCOL_RESET, |
| 231 | &scmi_reset_protocol_init); |
| 232 | } |
| 233 | subsys_initcall(scmi_reset_init); |