Mika Westerberg | fd3b339 | 2018-10-01 12:31:21 +0300 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 2 | /* |
| 3 | * Thunderbolt DMA configuration based mailbox support |
| 4 | * |
| 5 | * Copyright (C) 2017, Intel Corporation |
| 6 | * Authors: Michael Jamet <michael.jamet@intel.com> |
| 7 | * Mika Westerberg <mika.westerberg@linux.intel.com> |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <linux/delay.h> |
| 11 | #include <linux/slab.h> |
| 12 | |
| 13 | #include "dma_port.h" |
| 14 | #include "tb_regs.h" |
| 15 | |
| 16 | #define DMA_PORT_CAP 0x3e |
| 17 | |
| 18 | #define MAIL_DATA 1 |
| 19 | #define MAIL_DATA_DWORDS 16 |
| 20 | |
| 21 | #define MAIL_IN 17 |
| 22 | #define MAIL_IN_CMD_SHIFT 28 |
| 23 | #define MAIL_IN_CMD_MASK GENMASK(31, 28) |
| 24 | #define MAIL_IN_CMD_FLASH_WRITE 0x0 |
| 25 | #define MAIL_IN_CMD_FLASH_UPDATE_AUTH 0x1 |
| 26 | #define MAIL_IN_CMD_FLASH_READ 0x2 |
| 27 | #define MAIL_IN_CMD_POWER_CYCLE 0x4 |
| 28 | #define MAIL_IN_DWORDS_SHIFT 24 |
| 29 | #define MAIL_IN_DWORDS_MASK GENMASK(27, 24) |
| 30 | #define MAIL_IN_ADDRESS_SHIFT 2 |
| 31 | #define MAIL_IN_ADDRESS_MASK GENMASK(23, 2) |
| 32 | #define MAIL_IN_CSS BIT(1) |
| 33 | #define MAIL_IN_OP_REQUEST BIT(0) |
| 34 | |
| 35 | #define MAIL_OUT 18 |
| 36 | #define MAIL_OUT_STATUS_RESPONSE BIT(29) |
| 37 | #define MAIL_OUT_STATUS_CMD_SHIFT 4 |
| 38 | #define MAIL_OUT_STATUS_CMD_MASK GENMASK(7, 4) |
| 39 | #define MAIL_OUT_STATUS_MASK GENMASK(3, 0) |
| 40 | #define MAIL_OUT_STATUS_COMPLETED 0 |
| 41 | #define MAIL_OUT_STATUS_ERR_AUTH 1 |
| 42 | #define MAIL_OUT_STATUS_ERR_ACCESS 2 |
| 43 | |
| 44 | #define DMA_PORT_TIMEOUT 5000 /* ms */ |
| 45 | #define DMA_PORT_RETRIES 3 |
| 46 | |
| 47 | /** |
| 48 | * struct tb_dma_port - DMA control port |
| 49 | * @sw: Switch the DMA port belongs to |
| 50 | * @port: Switch port number where DMA capability is found |
| 51 | * @base: Start offset of the mailbox registers |
| 52 | * @buf: Temporary buffer to store a single block |
| 53 | */ |
| 54 | struct tb_dma_port { |
| 55 | struct tb_switch *sw; |
| 56 | u8 port; |
| 57 | u32 base; |
| 58 | u8 *buf; |
| 59 | }; |
| 60 | |
| 61 | /* |
| 62 | * When the switch is in safe mode it supports very little functionality |
| 63 | * so we don't validate that much here. |
| 64 | */ |
| 65 | static bool dma_port_match(const struct tb_cfg_request *req, |
| 66 | const struct ctl_pkg *pkg) |
| 67 | { |
| 68 | u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63); |
| 69 | |
| 70 | if (pkg->frame.eof == TB_CFG_PKG_ERROR) |
| 71 | return true; |
| 72 | if (pkg->frame.eof != req->response_type) |
| 73 | return false; |
| 74 | if (route != tb_cfg_get_route(req->request)) |
| 75 | return false; |
| 76 | if (pkg->frame.size != req->response_size) |
| 77 | return false; |
| 78 | |
| 79 | return true; |
| 80 | } |
| 81 | |
| 82 | static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) |
| 83 | { |
| 84 | memcpy(req->response, pkg->buffer, req->response_size); |
| 85 | return true; |
| 86 | } |
| 87 | |
| 88 | static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route, |
| 89 | u32 port, u32 offset, u32 length, int timeout_msec) |
| 90 | { |
| 91 | struct cfg_read_pkg request = { |
| 92 | .header = tb_cfg_make_header(route), |
| 93 | .addr = { |
| 94 | .seq = 1, |
| 95 | .port = port, |
| 96 | .space = TB_CFG_PORT, |
| 97 | .offset = offset, |
| 98 | .length = length, |
| 99 | }, |
| 100 | }; |
| 101 | struct tb_cfg_request *req; |
| 102 | struct cfg_write_pkg reply; |
| 103 | struct tb_cfg_result res; |
| 104 | |
| 105 | req = tb_cfg_request_alloc(); |
| 106 | if (!req) |
| 107 | return -ENOMEM; |
| 108 | |
| 109 | req->match = dma_port_match; |
| 110 | req->copy = dma_port_copy; |
| 111 | req->request = &request; |
| 112 | req->request_size = sizeof(request); |
| 113 | req->request_type = TB_CFG_PKG_READ; |
| 114 | req->response = &reply; |
| 115 | req->response_size = 12 + 4 * length; |
| 116 | req->response_type = TB_CFG_PKG_READ; |
| 117 | |
| 118 | res = tb_cfg_request_sync(ctl, req, timeout_msec); |
| 119 | |
| 120 | tb_cfg_request_put(req); |
| 121 | |
| 122 | if (res.err) |
| 123 | return res.err; |
| 124 | |
| 125 | memcpy(buffer, &reply.data, 4 * length); |
| 126 | return 0; |
| 127 | } |
| 128 | |
| 129 | static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route, |
| 130 | u32 port, u32 offset, u32 length, int timeout_msec) |
| 131 | { |
| 132 | struct cfg_write_pkg request = { |
| 133 | .header = tb_cfg_make_header(route), |
| 134 | .addr = { |
| 135 | .seq = 1, |
| 136 | .port = port, |
| 137 | .space = TB_CFG_PORT, |
| 138 | .offset = offset, |
| 139 | .length = length, |
| 140 | }, |
| 141 | }; |
| 142 | struct tb_cfg_request *req; |
| 143 | struct cfg_read_pkg reply; |
| 144 | struct tb_cfg_result res; |
| 145 | |
| 146 | memcpy(&request.data, buffer, length * 4); |
| 147 | |
| 148 | req = tb_cfg_request_alloc(); |
| 149 | if (!req) |
| 150 | return -ENOMEM; |
| 151 | |
| 152 | req->match = dma_port_match; |
| 153 | req->copy = dma_port_copy; |
| 154 | req->request = &request; |
| 155 | req->request_size = 12 + 4 * length; |
| 156 | req->request_type = TB_CFG_PKG_WRITE; |
| 157 | req->response = &reply; |
| 158 | req->response_size = sizeof(reply); |
| 159 | req->response_type = TB_CFG_PKG_WRITE; |
| 160 | |
| 161 | res = tb_cfg_request_sync(ctl, req, timeout_msec); |
| 162 | |
| 163 | tb_cfg_request_put(req); |
| 164 | |
| 165 | return res.err; |
| 166 | } |
| 167 | |
| 168 | static int dma_find_port(struct tb_switch *sw) |
| 169 | { |
Radion Mirchevsky | 4bac471 | 2017-10-04 16:43:43 +0300 | [diff] [blame] | 170 | static const int ports[] = { 3, 5, 7 }; |
| 171 | int i; |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 172 | |
| 173 | /* |
Radion Mirchevsky | 4bac471 | 2017-10-04 16:43:43 +0300 | [diff] [blame] | 174 | * The DMA (NHI) port is either 3, 5 or 7 depending on the |
| 175 | * controller. Try all of them. |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 176 | */ |
Radion Mirchevsky | 4bac471 | 2017-10-04 16:43:43 +0300 | [diff] [blame] | 177 | for (i = 0; i < ARRAY_SIZE(ports); i++) { |
| 178 | u32 type; |
| 179 | int ret; |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 180 | |
Radion Mirchevsky | 4bac471 | 2017-10-04 16:43:43 +0300 | [diff] [blame] | 181 | ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i], |
| 182 | 2, 1, DMA_PORT_TIMEOUT); |
| 183 | if (!ret && (type & 0xffffff) == TB_TYPE_NHI) |
| 184 | return ports[i]; |
| 185 | } |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 186 | |
| 187 | return -ENODEV; |
| 188 | } |
| 189 | |
| 190 | /** |
| 191 | * dma_port_alloc() - Finds DMA control port from a switch pointed by route |
| 192 | * @sw: Switch from where find the DMA port |
| 193 | * |
| 194 | * Function checks if the switch NHI port supports DMA configuration |
| 195 | * based mailbox capability and if it does, allocates and initializes |
| 196 | * DMA port structure. Returns %NULL if the capabity was not found. |
| 197 | * |
| 198 | * The DMA control port is functional also when the switch is in safe |
| 199 | * mode. |
| 200 | */ |
| 201 | struct tb_dma_port *dma_port_alloc(struct tb_switch *sw) |
| 202 | { |
| 203 | struct tb_dma_port *dma; |
| 204 | int port; |
| 205 | |
| 206 | port = dma_find_port(sw); |
| 207 | if (port < 0) |
| 208 | return NULL; |
| 209 | |
| 210 | dma = kzalloc(sizeof(*dma), GFP_KERNEL); |
| 211 | if (!dma) |
| 212 | return NULL; |
| 213 | |
| 214 | dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL); |
| 215 | if (!dma->buf) { |
| 216 | kfree(dma); |
| 217 | return NULL; |
| 218 | } |
| 219 | |
| 220 | dma->sw = sw; |
| 221 | dma->port = port; |
| 222 | dma->base = DMA_PORT_CAP; |
| 223 | |
| 224 | return dma; |
| 225 | } |
| 226 | |
| 227 | /** |
| 228 | * dma_port_free() - Release DMA control port structure |
| 229 | * @dma: DMA control port |
| 230 | */ |
| 231 | void dma_port_free(struct tb_dma_port *dma) |
| 232 | { |
| 233 | if (dma) { |
| 234 | kfree(dma->buf); |
| 235 | kfree(dma); |
| 236 | } |
| 237 | } |
| 238 | |
| 239 | static int dma_port_wait_for_completion(struct tb_dma_port *dma, |
| 240 | unsigned int timeout) |
| 241 | { |
| 242 | unsigned long end = jiffies + msecs_to_jiffies(timeout); |
| 243 | struct tb_switch *sw = dma->sw; |
| 244 | |
| 245 | do { |
| 246 | int ret; |
| 247 | u32 in; |
| 248 | |
| 249 | ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port, |
| 250 | dma->base + MAIL_IN, 1, 50); |
| 251 | if (ret) { |
| 252 | if (ret != -ETIMEDOUT) |
| 253 | return ret; |
| 254 | } else if (!(in & MAIL_IN_OP_REQUEST)) { |
| 255 | return 0; |
| 256 | } |
| 257 | |
| 258 | usleep_range(50, 100); |
| 259 | } while (time_before(jiffies, end)); |
| 260 | |
| 261 | return -ETIMEDOUT; |
| 262 | } |
| 263 | |
| 264 | static int status_to_errno(u32 status) |
| 265 | { |
| 266 | switch (status & MAIL_OUT_STATUS_MASK) { |
| 267 | case MAIL_OUT_STATUS_COMPLETED: |
| 268 | return 0; |
| 269 | case MAIL_OUT_STATUS_ERR_AUTH: |
| 270 | return -EINVAL; |
| 271 | case MAIL_OUT_STATUS_ERR_ACCESS: |
| 272 | return -EACCES; |
| 273 | } |
| 274 | |
| 275 | return -EIO; |
| 276 | } |
| 277 | |
| 278 | static int dma_port_request(struct tb_dma_port *dma, u32 in, |
| 279 | unsigned int timeout) |
| 280 | { |
| 281 | struct tb_switch *sw = dma->sw; |
| 282 | u32 out; |
| 283 | int ret; |
| 284 | |
| 285 | ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port, |
| 286 | dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT); |
| 287 | if (ret) |
| 288 | return ret; |
| 289 | |
| 290 | ret = dma_port_wait_for_completion(dma, timeout); |
| 291 | if (ret) |
| 292 | return ret; |
| 293 | |
| 294 | ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port, |
| 295 | dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT); |
| 296 | if (ret) |
| 297 | return ret; |
| 298 | |
| 299 | return status_to_errno(out); |
| 300 | } |
| 301 | |
Mika Westerberg | 34163df | 2021-04-01 16:57:06 +0300 | [diff] [blame] | 302 | static int dma_port_flash_read_block(void *data, unsigned int dwaddress, |
| 303 | void *buf, size_t dwords) |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 304 | { |
Mika Westerberg | 34163df | 2021-04-01 16:57:06 +0300 | [diff] [blame] | 305 | struct tb_dma_port *dma = data; |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 306 | struct tb_switch *sw = dma->sw; |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 307 | int ret; |
Mika Westerberg | 34163df | 2021-04-01 16:57:06 +0300 | [diff] [blame] | 308 | u32 in; |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 309 | |
| 310 | in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT; |
| 311 | if (dwords < MAIL_DATA_DWORDS) |
| 312 | in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK; |
| 313 | in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK; |
| 314 | in |= MAIL_IN_OP_REQUEST; |
| 315 | |
| 316 | ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT); |
| 317 | if (ret) |
| 318 | return ret; |
| 319 | |
| 320 | return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port, |
| 321 | dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT); |
| 322 | } |
| 323 | |
Mika Westerberg | 34163df | 2021-04-01 16:57:06 +0300 | [diff] [blame] | 324 | static int dma_port_flash_write_block(void *data, unsigned int dwaddress, |
| 325 | const void *buf, size_t dwords) |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 326 | { |
Mika Westerberg | 34163df | 2021-04-01 16:57:06 +0300 | [diff] [blame] | 327 | struct tb_dma_port *dma = data; |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 328 | struct tb_switch *sw = dma->sw; |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 329 | int ret; |
Mika Westerberg | 34163df | 2021-04-01 16:57:06 +0300 | [diff] [blame] | 330 | u32 in; |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 331 | |
| 332 | /* Write the block to MAIL_DATA registers */ |
| 333 | ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port, |
| 334 | dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT); |
Lee Jones | f679a41 | 2021-01-28 08:52:33 +0000 | [diff] [blame] | 335 | if (ret) |
| 336 | return ret; |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 337 | |
| 338 | in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT; |
| 339 | |
| 340 | /* CSS header write is always done to the same magic address */ |
Mika Westerberg | 34163df | 2021-04-01 16:57:06 +0300 | [diff] [blame] | 341 | if (dwaddress >= DMA_PORT_CSS_ADDRESS) |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 342 | in |= MAIL_IN_CSS; |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 343 | |
| 344 | in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK; |
| 345 | in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK; |
| 346 | in |= MAIL_IN_OP_REQUEST; |
| 347 | |
| 348 | return dma_port_request(dma, in, DMA_PORT_TIMEOUT); |
| 349 | } |
| 350 | |
| 351 | /** |
| 352 | * dma_port_flash_read() - Read from active flash region |
| 353 | * @dma: DMA control port |
| 354 | * @address: Address relative to the start of active region |
| 355 | * @buf: Buffer where the data is read |
| 356 | * @size: Size of the buffer |
| 357 | */ |
| 358 | int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address, |
| 359 | void *buf, size_t size) |
| 360 | { |
Mika Westerberg | 34163df | 2021-04-01 16:57:06 +0300 | [diff] [blame] | 361 | return tb_nvm_read_data(address, buf, size, DMA_PORT_RETRIES, |
| 362 | dma_port_flash_read_block, dma); |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 363 | } |
| 364 | |
| 365 | /** |
| 366 | * dma_port_flash_write() - Write to non-active flash region |
| 367 | * @dma: DMA control port |
| 368 | * @address: Address relative to the start of non-active region |
| 369 | * @buf: Data to write |
| 370 | * @size: Size of the buffer |
| 371 | * |
| 372 | * Writes block of data to the non-active flash region of the switch. If |
| 373 | * the address is given as %DMA_PORT_CSS_ADDRESS the block is written |
| 374 | * using CSS command. |
| 375 | */ |
| 376 | int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address, |
| 377 | const void *buf, size_t size) |
| 378 | { |
Mika Westerberg | 34163df | 2021-04-01 16:57:06 +0300 | [diff] [blame] | 379 | if (address >= DMA_PORT_CSS_ADDRESS && size > DMA_PORT_CSS_MAX_SIZE) |
| 380 | return -E2BIG; |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 381 | |
Mika Westerberg | 34163df | 2021-04-01 16:57:06 +0300 | [diff] [blame] | 382 | return tb_nvm_write_data(address, buf, size, DMA_PORT_RETRIES, |
| 383 | dma_port_flash_write_block, dma); |
Mika Westerberg | 3e13676 | 2017-06-06 15:25:14 +0300 | [diff] [blame] | 384 | } |
| 385 | |
| 386 | /** |
| 387 | * dma_port_flash_update_auth() - Starts flash authenticate cycle |
| 388 | * @dma: DMA control port |
| 389 | * |
| 390 | * Starts the flash update authentication cycle. If the image in the |
| 391 | * non-active area was valid, the switch starts upgrade process where |
| 392 | * active and non-active area get swapped in the end. Caller should call |
| 393 | * dma_port_flash_update_auth_status() to get status of this command. |
| 394 | * This is because if the switch in question is root switch the |
| 395 | * thunderbolt host controller gets reset as well. |
| 396 | */ |
| 397 | int dma_port_flash_update_auth(struct tb_dma_port *dma) |
| 398 | { |
| 399 | u32 in; |
| 400 | |
| 401 | in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT; |
| 402 | in |= MAIL_IN_OP_REQUEST; |
| 403 | |
| 404 | return dma_port_request(dma, in, 150); |
| 405 | } |
| 406 | |
| 407 | /** |
| 408 | * dma_port_flash_update_auth_status() - Reads status of update auth command |
| 409 | * @dma: DMA control port |
| 410 | * @status: Status code of the operation |
| 411 | * |
| 412 | * The function checks if there is status available from the last update |
| 413 | * auth command. Returns %0 if there is no status and no further |
| 414 | * action is required. If there is status, %1 is returned instead and |
| 415 | * @status holds the failure code. |
| 416 | * |
| 417 | * Negative return means there was an error reading status from the |
| 418 | * switch. |
| 419 | */ |
| 420 | int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status) |
| 421 | { |
| 422 | struct tb_switch *sw = dma->sw; |
| 423 | u32 out, cmd; |
| 424 | int ret; |
| 425 | |
| 426 | ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port, |
| 427 | dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT); |
| 428 | if (ret) |
| 429 | return ret; |
| 430 | |
| 431 | /* Check if the status relates to flash update auth */ |
| 432 | cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT; |
| 433 | if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) { |
| 434 | if (status) |
| 435 | *status = out & MAIL_OUT_STATUS_MASK; |
| 436 | |
| 437 | /* Reset is needed in any case */ |
| 438 | return 1; |
| 439 | } |
| 440 | |
| 441 | return 0; |
| 442 | } |
| 443 | |
| 444 | /** |
| 445 | * dma_port_power_cycle() - Power cycles the switch |
| 446 | * @dma: DMA control port |
| 447 | * |
| 448 | * Triggers power cycle to the switch. |
| 449 | */ |
| 450 | int dma_port_power_cycle(struct tb_dma_port *dma) |
| 451 | { |
| 452 | u32 in; |
| 453 | |
| 454 | in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT; |
| 455 | in |= MAIL_IN_OP_REQUEST; |
| 456 | |
| 457 | return dma_port_request(dma, in, 150); |
| 458 | } |