blob: b79be1f02d92cda2eb595ca7c30698b9cbadf91e [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Noeverf25bf6f2014-06-03 22:03:59 +02002/*
Mika Westerberg15c67842018-10-01 12:31:22 +03003 * Thunderbolt driver - control channel and configuration commands
Andreas Noeverf25bf6f2014-06-03 22:03:59 +02004 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
Mika Westerberg15c67842018-10-01 12:31:22 +03006 * Copyright (C) 2018, Intel Corporation
Andreas Noeverf25bf6f2014-06-03 22:03:59 +02007 */
8
9#include <linux/crc32.h>
Mika Westerbergd7f781b2017-06-06 15:25:10 +030010#include <linux/delay.h>
Andreas Noeverf25bf6f2014-06-03 22:03:59 +020011#include <linux/slab.h>
12#include <linux/pci.h>
13#include <linux/dmapool.h>
14#include <linux/workqueue.h>
Andreas Noeverf25bf6f2014-06-03 22:03:59 +020015
16#include "ctl.h"
17
18
Mika Westerbergd7f781b2017-06-06 15:25:10 +030019#define TB_CTL_RX_PKG_COUNT 10
Mika Westerberg61ec15e2020-12-28 13:17:13 +020020#define TB_CTL_RETRIES 1
Andreas Noeverf25bf6f2014-06-03 22:03:59 +020021
22/**
Mika Westerberg9c3db0b2021-01-28 13:05:55 +030023 * struct tb_ctl - Thunderbolt control channel
24 * @nhi: Pointer to the NHI structure
25 * @tx: Transmit ring
26 * @rx: Receive ring
27 * @frame_pool: DMA pool for control messages
28 * @rx_packets: Received control messages
29 * @request_queue_lock: Lock protecting @request_queue
30 * @request_queue: List of outstanding requests
31 * @running: Is the control channel running at the moment
32 * @callback: Callback called when hotplug message is received
33 * @callback_data: Data passed to @callback
Andreas Noeverf25bf6f2014-06-03 22:03:59 +020034 */
35struct tb_ctl {
36 struct tb_nhi *nhi;
37 struct tb_ring *tx;
38 struct tb_ring *rx;
39
40 struct dma_pool *frame_pool;
41 struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
Mika Westerbergd7f781b2017-06-06 15:25:10 +030042 struct mutex request_queue_lock;
43 struct list_head request_queue;
44 bool running;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +020045
Mika Westerberg81a54b52017-06-06 15:25:09 +030046 event_cb callback;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +020047 void *callback_data;
48};
49
50
51#define tb_ctl_WARN(ctl, format, arg...) \
52 dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
53
54#define tb_ctl_err(ctl, format, arg...) \
55 dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
56
57#define tb_ctl_warn(ctl, format, arg...) \
58 dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
59
60#define tb_ctl_info(ctl, format, arg...) \
61 dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
62
Mika Westerberg81a54b52017-06-06 15:25:09 +030063#define tb_ctl_dbg(ctl, format, arg...) \
64 dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
65
Mika Westerbergd7f781b2017-06-06 15:25:10 +030066static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
67/* Serializes access to request kref_get/put */
68static DEFINE_MUTEX(tb_cfg_request_lock);
69
70/**
71 * tb_cfg_request_alloc() - Allocates a new config request
72 *
73 * This is refcounted object so when you are done with this, call
74 * tb_cfg_request_put() to it.
75 */
76struct tb_cfg_request *tb_cfg_request_alloc(void)
77{
78 struct tb_cfg_request *req;
79
80 req = kzalloc(sizeof(*req), GFP_KERNEL);
81 if (!req)
82 return NULL;
83
84 kref_init(&req->kref);
85
86 return req;
87}
88
89/**
90 * tb_cfg_request_get() - Increase refcount of a request
91 * @req: Request whose refcount is increased
92 */
93void tb_cfg_request_get(struct tb_cfg_request *req)
94{
95 mutex_lock(&tb_cfg_request_lock);
96 kref_get(&req->kref);
97 mutex_unlock(&tb_cfg_request_lock);
98}
99
100static void tb_cfg_request_destroy(struct kref *kref)
101{
102 struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
103
104 kfree(req);
105}
106
107/**
108 * tb_cfg_request_put() - Decrease refcount and possibly release the request
109 * @req: Request whose refcount is decreased
110 *
111 * Call this function when you are done with the request. When refcount
112 * goes to %0 the object is released.
113 */
114void tb_cfg_request_put(struct tb_cfg_request *req)
115{
116 mutex_lock(&tb_cfg_request_lock);
117 kref_put(&req->kref, tb_cfg_request_destroy);
118 mutex_unlock(&tb_cfg_request_lock);
119}
120
121static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
122 struct tb_cfg_request *req)
123{
124 WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
125 WARN_ON(req->ctl);
126
127 mutex_lock(&ctl->request_queue_lock);
128 if (!ctl->running) {
129 mutex_unlock(&ctl->request_queue_lock);
130 return -ENOTCONN;
131 }
132 req->ctl = ctl;
133 list_add_tail(&req->list, &ctl->request_queue);
134 set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
135 mutex_unlock(&ctl->request_queue_lock);
136 return 0;
137}
138
139static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
140{
141 struct tb_ctl *ctl = req->ctl;
142
143 mutex_lock(&ctl->request_queue_lock);
144 list_del(&req->list);
145 clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
146 if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
147 wake_up(&tb_cfg_request_cancel_queue);
148 mutex_unlock(&ctl->request_queue_lock);
149}
150
151static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
152{
153 return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
154}
155
156static struct tb_cfg_request *
157tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
158{
159 struct tb_cfg_request *req;
160 bool found = false;
161
162 mutex_lock(&pkg->ctl->request_queue_lock);
163 list_for_each_entry(req, &pkg->ctl->request_queue, list) {
164 tb_cfg_request_get(req);
165 if (req->match(req, pkg)) {
166 found = true;
167 break;
168 }
169 tb_cfg_request_put(req);
170 }
171 mutex_unlock(&pkg->ctl->request_queue_lock);
172
173 return found ? req : NULL;
174}
175
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200176/* utility functions */
177
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300178
179static int check_header(const struct ctl_pkg *pkg, u32 len,
180 enum tb_cfg_pkg_type type, u64 route)
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200181{
182 struct tb_cfg_header *header = pkg->buffer;
183
184 /* check frame, TODO: frame flags */
185 if (WARN(len != pkg->frame.size,
186 "wrong framesize (expected %#x, got %#x)\n",
187 len, pkg->frame.size))
188 return -EIO;
189 if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
190 type, pkg->frame.eof))
191 return -EIO;
192 if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
193 pkg->frame.sof))
194 return -EIO;
195
196 /* check header */
197 if (WARN(header->unknown != 1 << 9,
198 "header->unknown is %#x\n", header->unknown))
199 return -EIO;
Mika Westerbergac6c44d2017-06-06 15:25:07 +0300200 if (WARN(route != tb_cfg_get_route(header),
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200201 "wrong route (expected %llx, got %llx)",
Mika Westerbergac6c44d2017-06-06 15:25:07 +0300202 route, tb_cfg_get_route(header)))
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200203 return -EIO;
204 return 0;
205}
206
207static int check_config_address(struct tb_cfg_address addr,
208 enum tb_cfg_space space, u32 offset,
209 u32 length)
210{
211 if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
212 return -EIO;
213 if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
214 space, addr.space))
215 return -EIO;
216 if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
217 offset, addr.offset))
218 return -EIO;
219 if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
220 length, addr.length))
221 return -EIO;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200222 /*
223 * We cannot check addr->port as it is set to the upstream port of the
224 * sender.
225 */
226 return 0;
227}
228
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300229static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200230{
231 struct cfg_error_pkg *pkg = response->buffer;
Mika Westerberg22255be2019-11-29 15:49:28 +0300232 struct tb_ctl *ctl = response->ctl;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200233 struct tb_cfg_result res = { 0 };
Mika Westerbergac6c44d2017-06-06 15:25:07 +0300234 res.response_route = tb_cfg_get_route(&pkg->header);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200235 res.response_port = 0;
236 res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
Mika Westerbergac6c44d2017-06-06 15:25:07 +0300237 tb_cfg_get_route(&pkg->header));
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200238 if (res.err)
239 return res;
240
Mika Westerberg22255be2019-11-29 15:49:28 +0300241 if (pkg->zero1)
242 tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1);
243 if (pkg->zero2)
244 tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2);
245 if (pkg->zero3)
246 tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3);
247
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200248 res.err = 1;
249 res.tb_error = pkg->error;
250 res.response_port = pkg->port;
251 return res;
252
253}
254
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300255static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200256 enum tb_cfg_pkg_type type, u64 route)
257{
258 struct tb_cfg_header *header = pkg->buffer;
259 struct tb_cfg_result res = { 0 };
260
261 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
262 return decode_error(pkg);
263
264 res.response_port = 0; /* will be updated later for cfg_read/write */
Mika Westerbergac6c44d2017-06-06 15:25:07 +0300265 res.response_route = tb_cfg_get_route(header);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200266 res.err = check_header(pkg, len, type, route);
267 return res;
268}
269
270static void tb_cfg_print_error(struct tb_ctl *ctl,
271 const struct tb_cfg_result *res)
272{
273 WARN_ON(res->err != 1);
274 switch (res->tb_error) {
275 case TB_CFG_ERROR_PORT_NOT_CONNECTED:
276 /* Port is not connected. This can happen during surprise
277 * removal. Do not warn. */
278 return;
279 case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
280 /*
281 * Invalid cfg_space/offset/length combination in
282 * cfg_read/cfg_write.
283 */
Mika Westerbergfa1653d2020-06-29 20:23:58 +0300284 tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
285 res->response_route, res->response_port);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200286 return;
287 case TB_CFG_ERROR_NO_SUCH_PORT:
288 /*
289 * - The route contains a non-existent port.
290 * - The route contains a non-PHY port (e.g. PCIe).
291 * - The port in cfg_read/cfg_write does not exist.
292 */
293 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
294 res->response_route, res->response_port);
295 return;
296 case TB_CFG_ERROR_LOOP:
297 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
298 res->response_route, res->response_port);
299 return;
Mika Westerberg80e7c5d2019-09-19 15:22:20 +0300300 case TB_CFG_ERROR_LOCK:
301 tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
302 res->response_route, res->response_port);
303 return;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200304 default:
305 /* 5,6,7,9 and 11 are also valid error codes */
306 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
307 res->response_route, res->response_port);
308 return;
309 }
310}
311
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300312static __be32 tb_crc(const void *data, size_t len)
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200313{
314 return cpu_to_be32(~__crc32c_le(~0, data, len));
315}
316
317static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
318{
319 if (pkg) {
320 dma_pool_free(pkg->ctl->frame_pool,
321 pkg->buffer, pkg->frame.buffer_phy);
322 kfree(pkg);
323 }
324}
325
326static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
327{
328 struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
329 if (!pkg)
Sachin Kamat8db353b2014-06-20 14:32:32 +0530330 return NULL;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200331 pkg->ctl = ctl;
332 pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
333 &pkg->frame.buffer_phy);
334 if (!pkg->buffer) {
335 kfree(pkg);
Sachin Kamat8db353b2014-06-20 14:32:32 +0530336 return NULL;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200337 }
338 return pkg;
339}
340
341
342/* RX/TX handling */
343
344static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
345 bool canceled)
346{
347 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
348 tb_ctl_pkg_free(pkg);
349}
350
Lee Jonesaa608252021-01-27 11:25:45 +0000351/*
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200352 * tb_cfg_tx() - transmit a packet on the control channel
353 *
354 * len must be a multiple of four.
355 *
356 * Return: Returns 0 on success or an error code on failure.
357 */
Mika Westerberg16a12582017-06-06 15:24:53 +0300358static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200359 enum tb_cfg_pkg_type type)
360{
361 int res;
362 struct ctl_pkg *pkg;
363 if (len % 4 != 0) { /* required for le->be conversion */
364 tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
365 return -EINVAL;
366 }
367 if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
368 tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
369 len, TB_FRAME_SIZE - 4);
370 return -EINVAL;
371 }
372 pkg = tb_ctl_pkg_alloc(ctl);
373 if (!pkg)
374 return -ENOMEM;
375 pkg->frame.callback = tb_ctl_tx_callback;
376 pkg->frame.size = len + 4;
377 pkg->frame.sof = type;
378 pkg->frame.eof = type;
379 cpu_to_be32_array(pkg->buffer, data, len / 4);
Andreas Noever801dba52014-06-20 21:42:22 +0200380 *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200381
Mika Westerberg3b3d9f42017-10-02 13:38:37 +0300382 res = tb_ring_tx(ctl->tx, &pkg->frame);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200383 if (res) /* ring is stopped */
384 tb_ctl_pkg_free(pkg);
385 return res;
386}
387
Lee Jonesaa608252021-01-27 11:25:45 +0000388/*
Mika Westerberg81a54b52017-06-06 15:25:09 +0300389 * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200390 */
Mika Westerbergd1ff7022017-10-02 13:38:34 +0300391static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
Mika Westerberg81a54b52017-06-06 15:25:09 +0300392 struct ctl_pkg *pkg, size_t size)
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200393{
Mika Westerbergd1ff7022017-10-02 13:38:34 +0300394 return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200395}
396
397static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
398{
Mika Westerberg3b3d9f42017-10-02 13:38:37 +0300399 tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200400 * We ignore failures during stop.
401 * All rx packets are referenced
402 * from ctl->rx_packets, so we do
403 * not loose them.
404 */
405}
406
Mika Westerberg81a54b52017-06-06 15:25:09 +0300407static int tb_async_error(const struct ctl_pkg *pkg)
408{
409 const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
410
411 if (pkg->frame.eof != TB_CFG_PKG_ERROR)
412 return false;
413
414 switch (error->error) {
415 case TB_CFG_ERROR_LINK_ERROR:
416 case TB_CFG_ERROR_HEC_ERROR_DETECTED:
417 case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
418 return true;
419
420 default:
421 return false;
422 }
423}
424
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200425static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
426 bool canceled)
427{
428 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300429 struct tb_cfg_request *req;
Mika Westerberg81a54b52017-06-06 15:25:09 +0300430 __be32 crc32;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200431
432 if (canceled)
433 return; /*
434 * ring is stopped, packet is referenced from
435 * ctl->rx_packets.
436 */
437
438 if (frame->size < 4 || frame->size % 4 != 0) {
439 tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
440 frame->size);
441 goto rx;
442 }
443
444 frame->size -= 4; /* remove checksum */
Mika Westerberg81a54b52017-06-06 15:25:09 +0300445 crc32 = tb_crc(pkg->buffer, frame->size);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200446 be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
447
Mika Westerberg81a54b52017-06-06 15:25:09 +0300448 switch (frame->eof) {
449 case TB_CFG_PKG_READ:
450 case TB_CFG_PKG_WRITE:
451 case TB_CFG_PKG_ERROR:
452 case TB_CFG_PKG_OVERRIDE:
453 case TB_CFG_PKG_RESET:
454 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
455 tb_ctl_err(pkg->ctl,
456 "RX: checksum mismatch, dropping packet\n");
457 goto rx;
458 }
459 if (tb_async_error(pkg)) {
460 tb_ctl_handle_event(pkg->ctl, frame->eof,
461 pkg, frame->size);
462 goto rx;
463 }
464 break;
465
466 case TB_CFG_PKG_EVENT:
Mika Westerbergd1ff7022017-10-02 13:38:34 +0300467 case TB_CFG_PKG_XDOMAIN_RESP:
468 case TB_CFG_PKG_XDOMAIN_REQ:
Mika Westerberg81a54b52017-06-06 15:25:09 +0300469 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
470 tb_ctl_err(pkg->ctl,
471 "RX: checksum mismatch, dropping packet\n");
472 goto rx;
473 }
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500474 fallthrough;
Mika Westerbergf67cf492017-06-06 15:25:16 +0300475 case TB_CFG_PKG_ICM_EVENT:
Mika Westerbergd1ff7022017-10-02 13:38:34 +0300476 if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
477 goto rx;
478 break;
Mika Westerberg81a54b52017-06-06 15:25:09 +0300479
480 default:
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300481 break;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200482 }
Mika Westerberg81a54b52017-06-06 15:25:09 +0300483
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300484 /*
485 * The received packet will be processed only if there is an
486 * active request and that the packet is what is expected. This
487 * prevents packets such as replies coming after timeout has
488 * triggered from messing with the active requests.
489 */
490 req = tb_cfg_request_find(pkg->ctl, pkg);
491 if (req) {
492 if (req->copy(req, pkg))
493 schedule_work(&req->work);
494 tb_cfg_request_put(req);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200495 }
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300496
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200497rx:
498 tb_ctl_rx_submit(pkg);
499}
500
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300501static void tb_cfg_request_work(struct work_struct *work)
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200502{
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300503 struct tb_cfg_request *req = container_of(work, typeof(*req), work);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200504
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300505 if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
506 req->callback(req->callback_data);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200507
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300508 tb_cfg_request_dequeue(req);
509 tb_cfg_request_put(req);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200510}
511
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300512/**
513 * tb_cfg_request() - Start control request not waiting for it to complete
514 * @ctl: Control channel to use
515 * @req: Request to start
516 * @callback: Callback called when the request is completed
517 * @callback_data: Data to be passed to @callback
518 *
519 * This queues @req on the given control channel without waiting for it
520 * to complete. When the request completes @callback is called.
521 */
522int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
523 void (*callback)(void *), void *callback_data)
524{
525 int ret;
526
527 req->flags = 0;
528 req->callback = callback;
529 req->callback_data = callback_data;
530 INIT_WORK(&req->work, tb_cfg_request_work);
531 INIT_LIST_HEAD(&req->list);
532
533 tb_cfg_request_get(req);
534 ret = tb_cfg_request_enqueue(ctl, req);
535 if (ret)
536 goto err_put;
537
538 ret = tb_ctl_tx(ctl, req->request, req->request_size,
539 req->request_type);
540 if (ret)
541 goto err_dequeue;
542
543 if (!req->response)
544 schedule_work(&req->work);
545
546 return 0;
547
548err_dequeue:
549 tb_cfg_request_dequeue(req);
550err_put:
551 tb_cfg_request_put(req);
552
553 return ret;
554}
555
556/**
557 * tb_cfg_request_cancel() - Cancel a control request
558 * @req: Request to cancel
559 * @err: Error to assign to the request
560 *
561 * This function can be used to cancel ongoing request. It will wait
562 * until the request is not active anymore.
563 */
564void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
565{
566 set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
567 schedule_work(&req->work);
568 wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
569 req->result.err = err;
570}
571
572static void tb_cfg_request_complete(void *data)
573{
574 complete(data);
575}
576
577/**
578 * tb_cfg_request_sync() - Start control request and wait until it completes
579 * @ctl: Control channel to use
580 * @req: Request to start
581 * @timeout_msec: Timeout how long to wait @req to complete
582 *
583 * Starts a control request and waits until it completes. If timeout
584 * triggers the request is canceled before function returns. Note the
585 * caller needs to make sure only one message for given switch is active
586 * at a time.
587 */
588struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
589 struct tb_cfg_request *req,
590 int timeout_msec)
591{
592 unsigned long timeout = msecs_to_jiffies(timeout_msec);
593 struct tb_cfg_result res = { 0 };
594 DECLARE_COMPLETION_ONSTACK(done);
595 int ret;
596
597 ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
598 if (ret) {
599 res.err = ret;
600 return res;
601 }
602
603 if (!wait_for_completion_timeout(&done, timeout))
604 tb_cfg_request_cancel(req, -ETIMEDOUT);
605
606 flush_work(&req->work);
607
608 return req->result;
609}
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200610
611/* public interface, alloc/start/stop/free */
612
613/**
614 * tb_ctl_alloc() - allocate a control channel
Mika Westerberg9c3db0b2021-01-28 13:05:55 +0300615 * @nhi: Pointer to NHI
616 * @cb: Callback called for plug events
617 * @cb_data: Data passed to @cb
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200618 *
619 * cb will be invoked once for every hot plug event.
620 *
621 * Return: Returns a pointer on success or NULL on failure.
622 */
Mika Westerberg81a54b52017-06-06 15:25:09 +0300623struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200624{
625 int i;
626 struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
627 if (!ctl)
628 return NULL;
629 ctl->nhi = nhi;
630 ctl->callback = cb;
631 ctl->callback_data = cb_data;
632
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300633 mutex_init(&ctl->request_queue_lock);
634 INIT_LIST_HEAD(&ctl->request_queue);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200635 ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
636 TB_FRAME_SIZE, 4, 0);
637 if (!ctl->frame_pool)
638 goto err;
639
Mika Westerberg3b3d9f42017-10-02 13:38:37 +0300640 ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200641 if (!ctl->tx)
642 goto err;
643
Mika Westerbergafe704a2020-10-19 19:15:20 +0300644 ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff,
645 0xffff, NULL, NULL);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200646 if (!ctl->rx)
647 goto err;
648
649 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
650 ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
651 if (!ctl->rx_packets[i])
652 goto err;
653 ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
654 }
655
Mika Westerbergdaa51402018-10-01 12:31:19 +0300656 tb_ctl_dbg(ctl, "control channel created\n");
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200657 return ctl;
658err:
659 tb_ctl_free(ctl);
660 return NULL;
661}
662
663/**
664 * tb_ctl_free() - free a control channel
Mika Westerberg9c3db0b2021-01-28 13:05:55 +0300665 * @ctl: Control channel to free
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200666 *
667 * Must be called after tb_ctl_stop.
668 *
669 * Must NOT be called from ctl->callback.
670 */
671void tb_ctl_free(struct tb_ctl *ctl)
672{
673 int i;
Mika Westerbergc9843eb2017-06-06 15:24:59 +0300674
675 if (!ctl)
676 return;
677
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200678 if (ctl->rx)
Mika Westerberg3b3d9f42017-10-02 13:38:37 +0300679 tb_ring_free(ctl->rx);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200680 if (ctl->tx)
Mika Westerberg3b3d9f42017-10-02 13:38:37 +0300681 tb_ring_free(ctl->tx);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200682
683 /* free RX packets */
684 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
685 tb_ctl_pkg_free(ctl->rx_packets[i]);
686
687
zhong jiang0bb5a1a2018-10-01 12:31:18 +0300688 dma_pool_destroy(ctl->frame_pool);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200689 kfree(ctl);
690}
691
692/**
693 * tb_cfg_start() - start/resume the control channel
Mika Westerberg9c3db0b2021-01-28 13:05:55 +0300694 * @ctl: Control channel to start
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200695 */
696void tb_ctl_start(struct tb_ctl *ctl)
697{
698 int i;
Mika Westerbergdaa51402018-10-01 12:31:19 +0300699 tb_ctl_dbg(ctl, "control channel starting...\n");
Mika Westerberg3b3d9f42017-10-02 13:38:37 +0300700 tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
701 tb_ring_start(ctl->rx);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200702 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
703 tb_ctl_rx_submit(ctl->rx_packets[i]);
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300704
705 ctl->running = true;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200706}
707
708/**
Mika Westerberg9c3db0b2021-01-28 13:05:55 +0300709 * tb_ctrl_stop() - pause the control channel
710 * @ctl: Control channel to stop
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200711 *
712 * All invocations of ctl->callback will have finished after this method
713 * returns.
714 *
715 * Must NOT be called from ctl->callback.
716 */
717void tb_ctl_stop(struct tb_ctl *ctl)
718{
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300719 mutex_lock(&ctl->request_queue_lock);
720 ctl->running = false;
721 mutex_unlock(&ctl->request_queue_lock);
722
Mika Westerberg3b3d9f42017-10-02 13:38:37 +0300723 tb_ring_stop(ctl->rx);
724 tb_ring_stop(ctl->tx);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200725
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300726 if (!list_empty(&ctl->request_queue))
727 tb_ctl_WARN(ctl, "dangling request in request_queue\n");
728 INIT_LIST_HEAD(&ctl->request_queue);
Mika Westerbergdaa51402018-10-01 12:31:19 +0300729 tb_ctl_dbg(ctl, "control channel stopped\n");
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200730}
731
732/* public interface, commands */
733
734/**
Mika Westerberg210e9f52019-12-17 15:33:39 +0300735 * tb_cfg_ack_plug() - Ack hot plug/unplug event
736 * @ctl: Control channel to use
737 * @route: Router that originated the event
738 * @port: Port where the hot plug/unplug happened
739 * @unplug: Ack hot plug or unplug
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200740 *
Mika Westerberg210e9f52019-12-17 15:33:39 +0300741 * Call this as response for hot plug/unplug event to ack it.
742 * Returns %0 on success or an error code on failure.
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200743 */
Mika Westerberg210e9f52019-12-17 15:33:39 +0300744int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200745{
746 struct cfg_error_pkg pkg = {
Mika Westerberg05c242e2017-06-06 15:25:08 +0300747 .header = tb_cfg_make_header(route),
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200748 .port = port,
Mika Westerberg210e9f52019-12-17 15:33:39 +0300749 .error = TB_CFG_ERROR_ACK_PLUG_EVENT,
750 .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
751 : TB_CFG_ERROR_PG_HOT_PLUG,
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200752 };
Mika Westerberg210e9f52019-12-17 15:33:39 +0300753 tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n",
754 unplug ? "un" : "", route, port);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200755 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
756}
757
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300758static bool tb_cfg_match(const struct tb_cfg_request *req,
759 const struct ctl_pkg *pkg)
760{
761 u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
762
763 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
764 return true;
765
766 if (pkg->frame.eof != req->response_type)
767 return false;
768 if (route != tb_cfg_get_route(req->request))
769 return false;
770 if (pkg->frame.size != req->response_size)
771 return false;
772
773 if (pkg->frame.eof == TB_CFG_PKG_READ ||
774 pkg->frame.eof == TB_CFG_PKG_WRITE) {
775 const struct cfg_read_pkg *req_hdr = req->request;
776 const struct cfg_read_pkg *res_hdr = pkg->buffer;
777
778 if (req_hdr->addr.seq != res_hdr->addr.seq)
779 return false;
780 }
781
782 return true;
783}
784
785static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
786{
787 struct tb_cfg_result res;
788
789 /* Now make sure it is in expected format */
790 res = parse_header(pkg, req->response_size, req->response_type,
791 tb_cfg_get_route(req->request));
792 if (!res.err)
793 memcpy(req->response, pkg->buffer, req->response_size);
794
795 req->result = res;
796
797 /* Always complete when first response is received */
798 return true;
799}
800
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200801/**
802 * tb_cfg_reset() - send a reset packet and wait for a response
Mika Westerberg9c3db0b2021-01-28 13:05:55 +0300803 * @ctl: Control channel pointer
804 * @route: Router string for the router to send reset
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200805 *
806 * If the switch at route is incorrectly configured then we will not receive a
807 * reply (even though the switch will reset). The caller should check for
808 * -ETIMEDOUT and attempt to reconfigure the switch.
809 */
Mika Westerbergbda83ae2020-12-22 13:40:31 +0200810struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200811{
Mika Westerberg05c242e2017-06-06 15:25:08 +0300812 struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300813 struct tb_cfg_result res = { 0 };
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200814 struct tb_cfg_header reply;
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300815 struct tb_cfg_request *req;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200816
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300817 req = tb_cfg_request_alloc();
818 if (!req) {
819 res.err = -ENOMEM;
820 return res;
821 }
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200822
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300823 req->match = tb_cfg_match;
824 req->copy = tb_cfg_copy;
825 req->request = &request;
826 req->request_size = sizeof(request);
827 req->request_type = TB_CFG_PKG_RESET;
828 req->response = &reply;
829 req->response_size = sizeof(reply);
Dan Carpenter02729d12017-08-16 11:54:17 +0300830 req->response_type = TB_CFG_PKG_RESET;
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300831
Mika Westerbergbda83ae2020-12-22 13:40:31 +0200832 res = tb_cfg_request_sync(ctl, req, TB_CFG_DEFAULT_TIMEOUT);
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300833
834 tb_cfg_request_put(req);
835
836 return res;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200837}
838
839/**
Mika Westerberg9c3db0b2021-01-28 13:05:55 +0300840 * tb_cfg_read_raw() - read from config space into buffer
841 * @ctl: Pointer to the control channel
842 * @buffer: Buffer where the data is read
843 * @route: Route string of the router
844 * @port: Port number when reading from %TB_CFG_PORT, %0 otherwise
845 * @space: Config space selector
846 * @offset: Dword word offset of the register to start reading
847 * @length: Number of dwords to read
848 * @timeout_msec: Timeout in ms how long to wait for the response
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200849 *
Mika Westerberg9c3db0b2021-01-28 13:05:55 +0300850 * Reads from router config space without translating the possible error.
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200851 */
852struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
853 u64 route, u32 port, enum tb_cfg_space space,
854 u32 offset, u32 length, int timeout_msec)
855{
856 struct tb_cfg_result res = { 0 };
857 struct cfg_read_pkg request = {
Mika Westerberg05c242e2017-06-06 15:25:08 +0300858 .header = tb_cfg_make_header(route),
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200859 .addr = {
860 .port = port,
861 .space = space,
862 .offset = offset,
863 .length = length,
864 },
865 };
866 struct cfg_write_pkg reply;
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300867 int retries = 0;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200868
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300869 while (retries < TB_CTL_RETRIES) {
870 struct tb_cfg_request *req;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200871
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300872 req = tb_cfg_request_alloc();
873 if (!req) {
874 res.err = -ENOMEM;
875 return res;
876 }
877
878 request.addr.seq = retries++;
879
880 req->match = tb_cfg_match;
881 req->copy = tb_cfg_copy;
882 req->request = &request;
883 req->request_size = sizeof(request);
884 req->request_type = TB_CFG_PKG_READ;
885 req->response = &reply;
886 req->response_size = 12 + 4 * length;
887 req->response_type = TB_CFG_PKG_READ;
888
889 res = tb_cfg_request_sync(ctl, req, timeout_msec);
890
891 tb_cfg_request_put(req);
892
893 if (res.err != -ETIMEDOUT)
894 break;
895
896 /* Wait a bit (arbitrary time) until we send a retry */
897 usleep_range(10, 100);
898 }
899
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200900 if (res.err)
901 return res;
902
903 res.response_port = reply.addr.port;
904 res.err = check_config_address(reply.addr, space, offset, length);
905 if (!res.err)
906 memcpy(buffer, &reply.data, 4 * length);
907 return res;
908}
909
910/**
911 * tb_cfg_write() - write from buffer into config space
Mika Westerberg9c3db0b2021-01-28 13:05:55 +0300912 * @ctl: Pointer to the control channel
913 * @buffer: Data to write
914 * @route: Route string of the router
915 * @port: Port number when writing to %TB_CFG_PORT, %0 otherwise
916 * @space: Config space selector
917 * @offset: Dword word offset of the register to start writing
918 * @length: Number of dwords to write
919 * @timeout_msec: Timeout in ms how long to wait for the response
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200920 *
Mika Westerberg9c3db0b2021-01-28 13:05:55 +0300921 * Writes to router config space without translating the possible error.
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200922 */
Mika Westerberg16a12582017-06-06 15:24:53 +0300923struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200924 u64 route, u32 port, enum tb_cfg_space space,
925 u32 offset, u32 length, int timeout_msec)
926{
927 struct tb_cfg_result res = { 0 };
928 struct cfg_write_pkg request = {
Mika Westerberg05c242e2017-06-06 15:25:08 +0300929 .header = tb_cfg_make_header(route),
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200930 .addr = {
931 .port = port,
932 .space = space,
933 .offset = offset,
934 .length = length,
935 },
936 };
937 struct cfg_read_pkg reply;
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300938 int retries = 0;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200939
940 memcpy(&request.data, buffer, length * 4);
941
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300942 while (retries < TB_CTL_RETRIES) {
943 struct tb_cfg_request *req;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200944
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300945 req = tb_cfg_request_alloc();
946 if (!req) {
947 res.err = -ENOMEM;
948 return res;
949 }
950
951 request.addr.seq = retries++;
952
953 req->match = tb_cfg_match;
954 req->copy = tb_cfg_copy;
955 req->request = &request;
956 req->request_size = 12 + 4 * length;
957 req->request_type = TB_CFG_PKG_WRITE;
958 req->response = &reply;
959 req->response_size = sizeof(reply);
960 req->response_type = TB_CFG_PKG_WRITE;
961
962 res = tb_cfg_request_sync(ctl, req, timeout_msec);
963
964 tb_cfg_request_put(req);
965
966 if (res.err != -ETIMEDOUT)
967 break;
968
969 /* Wait a bit (arbitrary time) until we send a retry */
970 usleep_range(10, 100);
971 }
972
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200973 if (res.err)
974 return res;
975
976 res.response_port = reply.addr.port;
977 res.err = check_config_address(reply.addr, space, offset, length);
978 return res;
979}
980
Mika Westerbergd94dcbb2018-07-04 08:50:01 +0300981static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
982 const struct tb_cfg_result *res)
983{
984 /*
985 * For unimplemented ports access to port config space may return
986 * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
987 * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
988 * that the caller can mark the port as disabled.
989 */
990 if (space == TB_CFG_PORT &&
991 res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
992 return -ENODEV;
993
994 tb_cfg_print_error(ctl, res);
Mika Westerberg80e7c5d2019-09-19 15:22:20 +0300995
996 if (res->tb_error == TB_CFG_ERROR_LOCK)
997 return -EACCES;
Mika Westerberg463e48f2020-11-17 13:19:41 +0300998 else if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED)
999 return -ENOTCONN;
1000
Mika Westerbergd94dcbb2018-07-04 08:50:01 +03001001 return -EIO;
1002}
1003
Andreas Noeverf25bf6f2014-06-03 22:03:59 +02001004int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
1005 enum tb_cfg_space space, u32 offset, u32 length)
1006{
1007 struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
1008 space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
Mika Westerbergd7f781b2017-06-06 15:25:10 +03001009 switch (res.err) {
1010 case 0:
1011 /* Success */
1012 break;
1013
1014 case 1:
1015 /* Thunderbolt error, tb_error holds the actual number */
Mika Westerbergd94dcbb2018-07-04 08:50:01 +03001016 return tb_cfg_get_error(ctl, space, &res);
Mika Westerbergd7f781b2017-06-06 15:25:10 +03001017
1018 case -ETIMEDOUT:
Mika Westerberg68b91292019-03-22 14:28:11 +02001019 tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
1020 route, space, offset);
Mika Westerbergd7f781b2017-06-06 15:25:10 +03001021 break;
1022
1023 default:
1024 WARN(1, "tb_cfg_read: %d\n", res.err);
1025 break;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +02001026 }
Andreas Noeverf25bf6f2014-06-03 22:03:59 +02001027 return res.err;
1028}
1029
Mika Westerberg16a12582017-06-06 15:24:53 +03001030int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
Andreas Noeverf25bf6f2014-06-03 22:03:59 +02001031 enum tb_cfg_space space, u32 offset, u32 length)
1032{
1033 struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
1034 space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
Mika Westerbergd7f781b2017-06-06 15:25:10 +03001035 switch (res.err) {
1036 case 0:
1037 /* Success */
1038 break;
1039
1040 case 1:
1041 /* Thunderbolt error, tb_error holds the actual number */
Mika Westerbergd94dcbb2018-07-04 08:50:01 +03001042 return tb_cfg_get_error(ctl, space, &res);
Mika Westerbergd7f781b2017-06-06 15:25:10 +03001043
1044 case -ETIMEDOUT:
Mika Westerberg68b91292019-03-22 14:28:11 +02001045 tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
1046 route, space, offset);
Mika Westerbergd7f781b2017-06-06 15:25:10 +03001047 break;
1048
1049 default:
1050 WARN(1, "tb_cfg_write: %d\n", res.err);
1051 break;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +02001052 }
Andreas Noeverf25bf6f2014-06-03 22:03:59 +02001053 return res.err;
1054}
1055
1056/**
1057 * tb_cfg_get_upstream_port() - get upstream port number of switch at route
Mika Westerberg9c3db0b2021-01-28 13:05:55 +03001058 * @ctl: Pointer to the control channel
1059 * @route: Route string of the router
Andreas Noeverf25bf6f2014-06-03 22:03:59 +02001060 *
1061 * Reads the first dword from the switches TB_CFG_SWITCH config area and
1062 * returns the port number from which the reply originated.
1063 *
1064 * Return: Returns the upstream port number on success or an error code on
1065 * failure.
1066 */
1067int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
1068{
1069 u32 dummy;
1070 struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
1071 TB_CFG_SWITCH, 0, 1,
1072 TB_CFG_DEFAULT_TIMEOUT);
1073 if (res.err == 1)
1074 return -EIO;
1075 if (res.err)
1076 return res.err;
1077 return res.response_port;
1078}