blob: 27c30ff79a8434873a499cf93d0c6835c0f0235f [file] [log] [blame]
Andreas Noeverf25bf6f2014-06-03 22:03:59 +02001/*
2 * Thunderbolt Cactus Ridge driver - control channel and configuration commands
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#include <linux/crc32.h>
Mika Westerbergd7f781b2017-06-06 15:25:10 +03008#include <linux/delay.h>
Andreas Noeverf25bf6f2014-06-03 22:03:59 +02009#include <linux/slab.h>
10#include <linux/pci.h>
11#include <linux/dmapool.h>
12#include <linux/workqueue.h>
Andreas Noeverf25bf6f2014-06-03 22:03:59 +020013
14#include "ctl.h"
15
16
Mika Westerbergd7f781b2017-06-06 15:25:10 +030017#define TB_CTL_RX_PKG_COUNT 10
18#define TB_CTL_RETRIES 4
Andreas Noeverf25bf6f2014-06-03 22:03:59 +020019
20/**
21 * struct tb_cfg - thunderbolt control channel
22 */
23struct tb_ctl {
24 struct tb_nhi *nhi;
25 struct tb_ring *tx;
26 struct tb_ring *rx;
27
28 struct dma_pool *frame_pool;
29 struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
Mika Westerbergd7f781b2017-06-06 15:25:10 +030030 struct mutex request_queue_lock;
31 struct list_head request_queue;
32 bool running;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +020033
Mika Westerberg81a54b52017-06-06 15:25:09 +030034 event_cb callback;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +020035 void *callback_data;
36};
37
38
39#define tb_ctl_WARN(ctl, format, arg...) \
40 dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
41
42#define tb_ctl_err(ctl, format, arg...) \
43 dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
44
45#define tb_ctl_warn(ctl, format, arg...) \
46 dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
47
48#define tb_ctl_info(ctl, format, arg...) \
49 dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
50
Mika Westerberg81a54b52017-06-06 15:25:09 +030051#define tb_ctl_dbg(ctl, format, arg...) \
52 dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
53
Mika Westerbergd7f781b2017-06-06 15:25:10 +030054static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
55/* Serializes access to request kref_get/put */
56static DEFINE_MUTEX(tb_cfg_request_lock);
57
58/**
59 * tb_cfg_request_alloc() - Allocates a new config request
60 *
61 * This is refcounted object so when you are done with this, call
62 * tb_cfg_request_put() to it.
63 */
64struct tb_cfg_request *tb_cfg_request_alloc(void)
65{
66 struct tb_cfg_request *req;
67
68 req = kzalloc(sizeof(*req), GFP_KERNEL);
69 if (!req)
70 return NULL;
71
72 kref_init(&req->kref);
73
74 return req;
75}
76
77/**
78 * tb_cfg_request_get() - Increase refcount of a request
79 * @req: Request whose refcount is increased
80 */
81void tb_cfg_request_get(struct tb_cfg_request *req)
82{
83 mutex_lock(&tb_cfg_request_lock);
84 kref_get(&req->kref);
85 mutex_unlock(&tb_cfg_request_lock);
86}
87
88static void tb_cfg_request_destroy(struct kref *kref)
89{
90 struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
91
92 kfree(req);
93}
94
95/**
96 * tb_cfg_request_put() - Decrease refcount and possibly release the request
97 * @req: Request whose refcount is decreased
98 *
99 * Call this function when you are done with the request. When refcount
100 * goes to %0 the object is released.
101 */
102void tb_cfg_request_put(struct tb_cfg_request *req)
103{
104 mutex_lock(&tb_cfg_request_lock);
105 kref_put(&req->kref, tb_cfg_request_destroy);
106 mutex_unlock(&tb_cfg_request_lock);
107}
108
109static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
110 struct tb_cfg_request *req)
111{
112 WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
113 WARN_ON(req->ctl);
114
115 mutex_lock(&ctl->request_queue_lock);
116 if (!ctl->running) {
117 mutex_unlock(&ctl->request_queue_lock);
118 return -ENOTCONN;
119 }
120 req->ctl = ctl;
121 list_add_tail(&req->list, &ctl->request_queue);
122 set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
123 mutex_unlock(&ctl->request_queue_lock);
124 return 0;
125}
126
127static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
128{
129 struct tb_ctl *ctl = req->ctl;
130
131 mutex_lock(&ctl->request_queue_lock);
132 list_del(&req->list);
133 clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
134 if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
135 wake_up(&tb_cfg_request_cancel_queue);
136 mutex_unlock(&ctl->request_queue_lock);
137}
138
139static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
140{
141 return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
142}
143
144static struct tb_cfg_request *
145tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
146{
147 struct tb_cfg_request *req;
148 bool found = false;
149
150 mutex_lock(&pkg->ctl->request_queue_lock);
151 list_for_each_entry(req, &pkg->ctl->request_queue, list) {
152 tb_cfg_request_get(req);
153 if (req->match(req, pkg)) {
154 found = true;
155 break;
156 }
157 tb_cfg_request_put(req);
158 }
159 mutex_unlock(&pkg->ctl->request_queue_lock);
160
161 return found ? req : NULL;
162}
163
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200164/* utility functions */
165
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300166
167static int check_header(const struct ctl_pkg *pkg, u32 len,
168 enum tb_cfg_pkg_type type, u64 route)
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200169{
170 struct tb_cfg_header *header = pkg->buffer;
171
172 /* check frame, TODO: frame flags */
173 if (WARN(len != pkg->frame.size,
174 "wrong framesize (expected %#x, got %#x)\n",
175 len, pkg->frame.size))
176 return -EIO;
177 if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
178 type, pkg->frame.eof))
179 return -EIO;
180 if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
181 pkg->frame.sof))
182 return -EIO;
183
184 /* check header */
185 if (WARN(header->unknown != 1 << 9,
186 "header->unknown is %#x\n", header->unknown))
187 return -EIO;
Mika Westerbergac6c44d2017-06-06 15:25:07 +0300188 if (WARN(route != tb_cfg_get_route(header),
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200189 "wrong route (expected %llx, got %llx)",
Mika Westerbergac6c44d2017-06-06 15:25:07 +0300190 route, tb_cfg_get_route(header)))
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200191 return -EIO;
192 return 0;
193}
194
195static int check_config_address(struct tb_cfg_address addr,
196 enum tb_cfg_space space, u32 offset,
197 u32 length)
198{
199 if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
200 return -EIO;
201 if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
202 space, addr.space))
203 return -EIO;
204 if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
205 offset, addr.offset))
206 return -EIO;
207 if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
208 length, addr.length))
209 return -EIO;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200210 /*
211 * We cannot check addr->port as it is set to the upstream port of the
212 * sender.
213 */
214 return 0;
215}
216
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300217static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200218{
219 struct cfg_error_pkg *pkg = response->buffer;
220 struct tb_cfg_result res = { 0 };
Mika Westerbergac6c44d2017-06-06 15:25:07 +0300221 res.response_route = tb_cfg_get_route(&pkg->header);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200222 res.response_port = 0;
223 res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
Mika Westerbergac6c44d2017-06-06 15:25:07 +0300224 tb_cfg_get_route(&pkg->header));
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200225 if (res.err)
226 return res;
227
228 WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1);
229 WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1);
230 WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1);
231 res.err = 1;
232 res.tb_error = pkg->error;
233 res.response_port = pkg->port;
234 return res;
235
236}
237
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300238static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200239 enum tb_cfg_pkg_type type, u64 route)
240{
241 struct tb_cfg_header *header = pkg->buffer;
242 struct tb_cfg_result res = { 0 };
243
244 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
245 return decode_error(pkg);
246
247 res.response_port = 0; /* will be updated later for cfg_read/write */
Mika Westerbergac6c44d2017-06-06 15:25:07 +0300248 res.response_route = tb_cfg_get_route(header);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200249 res.err = check_header(pkg, len, type, route);
250 return res;
251}
252
253static void tb_cfg_print_error(struct tb_ctl *ctl,
254 const struct tb_cfg_result *res)
255{
256 WARN_ON(res->err != 1);
257 switch (res->tb_error) {
258 case TB_CFG_ERROR_PORT_NOT_CONNECTED:
259 /* Port is not connected. This can happen during surprise
260 * removal. Do not warn. */
261 return;
262 case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
263 /*
264 * Invalid cfg_space/offset/length combination in
265 * cfg_read/cfg_write.
266 */
267 tb_ctl_WARN(ctl,
Lukas Wunneraae20bb2016-03-20 13:57:20 +0100268 "CFG_ERROR(%llx:%x): Invalid config space or offset\n",
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200269 res->response_route, res->response_port);
270 return;
271 case TB_CFG_ERROR_NO_SUCH_PORT:
272 /*
273 * - The route contains a non-existent port.
274 * - The route contains a non-PHY port (e.g. PCIe).
275 * - The port in cfg_read/cfg_write does not exist.
276 */
277 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
278 res->response_route, res->response_port);
279 return;
280 case TB_CFG_ERROR_LOOP:
281 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
282 res->response_route, res->response_port);
283 return;
284 default:
285 /* 5,6,7,9 and 11 are also valid error codes */
286 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
287 res->response_route, res->response_port);
288 return;
289 }
290}
291
Mika Westerberg16a12582017-06-06 15:24:53 +0300292static void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200293{
294 int i;
295 for (i = 0; i < len; i++)
296 dst[i] = cpu_to_be32(src[i]);
297}
298
299static void be32_to_cpu_array(u32 *dst, __be32 *src, size_t len)
300{
301 int i;
302 for (i = 0; i < len; i++)
303 dst[i] = be32_to_cpu(src[i]);
304}
305
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300306static __be32 tb_crc(const void *data, size_t len)
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200307{
308 return cpu_to_be32(~__crc32c_le(~0, data, len));
309}
310
311static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
312{
313 if (pkg) {
314 dma_pool_free(pkg->ctl->frame_pool,
315 pkg->buffer, pkg->frame.buffer_phy);
316 kfree(pkg);
317 }
318}
319
320static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
321{
322 struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
323 if (!pkg)
Sachin Kamat8db353b2014-06-20 14:32:32 +0530324 return NULL;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200325 pkg->ctl = ctl;
326 pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
327 &pkg->frame.buffer_phy);
328 if (!pkg->buffer) {
329 kfree(pkg);
Sachin Kamat8db353b2014-06-20 14:32:32 +0530330 return NULL;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200331 }
332 return pkg;
333}
334
335
336/* RX/TX handling */
337
338static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
339 bool canceled)
340{
341 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
342 tb_ctl_pkg_free(pkg);
343}
344
345/**
346 * tb_cfg_tx() - transmit a packet on the control channel
347 *
348 * len must be a multiple of four.
349 *
350 * Return: Returns 0 on success or an error code on failure.
351 */
Mika Westerberg16a12582017-06-06 15:24:53 +0300352static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200353 enum tb_cfg_pkg_type type)
354{
355 int res;
356 struct ctl_pkg *pkg;
357 if (len % 4 != 0) { /* required for le->be conversion */
358 tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
359 return -EINVAL;
360 }
361 if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
362 tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
363 len, TB_FRAME_SIZE - 4);
364 return -EINVAL;
365 }
366 pkg = tb_ctl_pkg_alloc(ctl);
367 if (!pkg)
368 return -ENOMEM;
369 pkg->frame.callback = tb_ctl_tx_callback;
370 pkg->frame.size = len + 4;
371 pkg->frame.sof = type;
372 pkg->frame.eof = type;
373 cpu_to_be32_array(pkg->buffer, data, len / 4);
Andreas Noever801dba52014-06-20 21:42:22 +0200374 *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200375
376 res = ring_tx(ctl->tx, &pkg->frame);
377 if (res) /* ring is stopped */
378 tb_ctl_pkg_free(pkg);
379 return res;
380}
381
382/**
Mika Westerberg81a54b52017-06-06 15:25:09 +0300383 * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200384 */
Mika Westerberg81a54b52017-06-06 15:25:09 +0300385static void tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
386 struct ctl_pkg *pkg, size_t size)
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200387{
Mika Westerberg81a54b52017-06-06 15:25:09 +0300388 ctl->callback(ctl->callback_data, type, pkg->buffer, size);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200389}
390
391static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
392{
393 ring_rx(pkg->ctl->rx, &pkg->frame); /*
394 * We ignore failures during stop.
395 * All rx packets are referenced
396 * from ctl->rx_packets, so we do
397 * not loose them.
398 */
399}
400
Mika Westerberg81a54b52017-06-06 15:25:09 +0300401static int tb_async_error(const struct ctl_pkg *pkg)
402{
403 const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
404
405 if (pkg->frame.eof != TB_CFG_PKG_ERROR)
406 return false;
407
408 switch (error->error) {
409 case TB_CFG_ERROR_LINK_ERROR:
410 case TB_CFG_ERROR_HEC_ERROR_DETECTED:
411 case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
412 return true;
413
414 default:
415 return false;
416 }
417}
418
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200419static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
420 bool canceled)
421{
422 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300423 struct tb_cfg_request *req;
Mika Westerberg81a54b52017-06-06 15:25:09 +0300424 __be32 crc32;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200425
426 if (canceled)
427 return; /*
428 * ring is stopped, packet is referenced from
429 * ctl->rx_packets.
430 */
431
432 if (frame->size < 4 || frame->size % 4 != 0) {
433 tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
434 frame->size);
435 goto rx;
436 }
437
438 frame->size -= 4; /* remove checksum */
Mika Westerberg81a54b52017-06-06 15:25:09 +0300439 crc32 = tb_crc(pkg->buffer, frame->size);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200440 be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
441
Mika Westerberg81a54b52017-06-06 15:25:09 +0300442 switch (frame->eof) {
443 case TB_CFG_PKG_READ:
444 case TB_CFG_PKG_WRITE:
445 case TB_CFG_PKG_ERROR:
446 case TB_CFG_PKG_OVERRIDE:
447 case TB_CFG_PKG_RESET:
448 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
449 tb_ctl_err(pkg->ctl,
450 "RX: checksum mismatch, dropping packet\n");
451 goto rx;
452 }
453 if (tb_async_error(pkg)) {
454 tb_ctl_handle_event(pkg->ctl, frame->eof,
455 pkg, frame->size);
456 goto rx;
457 }
458 break;
459
460 case TB_CFG_PKG_EVENT:
461 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
462 tb_ctl_err(pkg->ctl,
463 "RX: checksum mismatch, dropping packet\n");
464 goto rx;
465 }
466 tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size);
467 goto rx;
468
469 default:
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300470 break;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200471 }
Mika Westerberg81a54b52017-06-06 15:25:09 +0300472
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300473 /*
474 * The received packet will be processed only if there is an
475 * active request and that the packet is what is expected. This
476 * prevents packets such as replies coming after timeout has
477 * triggered from messing with the active requests.
478 */
479 req = tb_cfg_request_find(pkg->ctl, pkg);
480 if (req) {
481 if (req->copy(req, pkg))
482 schedule_work(&req->work);
483 tb_cfg_request_put(req);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200484 }
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300485
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200486rx:
487 tb_ctl_rx_submit(pkg);
488}
489
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300490static void tb_cfg_request_work(struct work_struct *work)
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200491{
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300492 struct tb_cfg_request *req = container_of(work, typeof(*req), work);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200493
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300494 if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
495 req->callback(req->callback_data);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200496
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300497 tb_cfg_request_dequeue(req);
498 tb_cfg_request_put(req);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200499}
500
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300501/**
502 * tb_cfg_request() - Start control request not waiting for it to complete
503 * @ctl: Control channel to use
504 * @req: Request to start
505 * @callback: Callback called when the request is completed
506 * @callback_data: Data to be passed to @callback
507 *
508 * This queues @req on the given control channel without waiting for it
509 * to complete. When the request completes @callback is called.
510 */
511int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
512 void (*callback)(void *), void *callback_data)
513{
514 int ret;
515
516 req->flags = 0;
517 req->callback = callback;
518 req->callback_data = callback_data;
519 INIT_WORK(&req->work, tb_cfg_request_work);
520 INIT_LIST_HEAD(&req->list);
521
522 tb_cfg_request_get(req);
523 ret = tb_cfg_request_enqueue(ctl, req);
524 if (ret)
525 goto err_put;
526
527 ret = tb_ctl_tx(ctl, req->request, req->request_size,
528 req->request_type);
529 if (ret)
530 goto err_dequeue;
531
532 if (!req->response)
533 schedule_work(&req->work);
534
535 return 0;
536
537err_dequeue:
538 tb_cfg_request_dequeue(req);
539err_put:
540 tb_cfg_request_put(req);
541
542 return ret;
543}
544
545/**
546 * tb_cfg_request_cancel() - Cancel a control request
547 * @req: Request to cancel
548 * @err: Error to assign to the request
549 *
550 * This function can be used to cancel ongoing request. It will wait
551 * until the request is not active anymore.
552 */
553void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
554{
555 set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
556 schedule_work(&req->work);
557 wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
558 req->result.err = err;
559}
560
561static void tb_cfg_request_complete(void *data)
562{
563 complete(data);
564}
565
566/**
567 * tb_cfg_request_sync() - Start control request and wait until it completes
568 * @ctl: Control channel to use
569 * @req: Request to start
570 * @timeout_msec: Timeout how long to wait @req to complete
571 *
572 * Starts a control request and waits until it completes. If timeout
573 * triggers the request is canceled before function returns. Note the
574 * caller needs to make sure only one message for given switch is active
575 * at a time.
576 */
577struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
578 struct tb_cfg_request *req,
579 int timeout_msec)
580{
581 unsigned long timeout = msecs_to_jiffies(timeout_msec);
582 struct tb_cfg_result res = { 0 };
583 DECLARE_COMPLETION_ONSTACK(done);
584 int ret;
585
586 ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
587 if (ret) {
588 res.err = ret;
589 return res;
590 }
591
592 if (!wait_for_completion_timeout(&done, timeout))
593 tb_cfg_request_cancel(req, -ETIMEDOUT);
594
595 flush_work(&req->work);
596
597 return req->result;
598}
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200599
600/* public interface, alloc/start/stop/free */
601
602/**
603 * tb_ctl_alloc() - allocate a control channel
604 *
605 * cb will be invoked once for every hot plug event.
606 *
607 * Return: Returns a pointer on success or NULL on failure.
608 */
Mika Westerberg81a54b52017-06-06 15:25:09 +0300609struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200610{
611 int i;
612 struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
613 if (!ctl)
614 return NULL;
615 ctl->nhi = nhi;
616 ctl->callback = cb;
617 ctl->callback_data = cb_data;
618
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300619 mutex_init(&ctl->request_queue_lock);
620 INIT_LIST_HEAD(&ctl->request_queue);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200621 ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
622 TB_FRAME_SIZE, 4, 0);
623 if (!ctl->frame_pool)
624 goto err;
625
Mika Westerberg046bee12017-06-06 15:24:57 +0300626 ctl->tx = ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200627 if (!ctl->tx)
628 goto err;
629
Mika Westerberg046bee12017-06-06 15:24:57 +0300630 ctl->rx = ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200631 if (!ctl->rx)
632 goto err;
633
634 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
635 ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
636 if (!ctl->rx_packets[i])
637 goto err;
638 ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
639 }
640
641 tb_ctl_info(ctl, "control channel created\n");
642 return ctl;
643err:
644 tb_ctl_free(ctl);
645 return NULL;
646}
647
648/**
649 * tb_ctl_free() - free a control channel
650 *
651 * Must be called after tb_ctl_stop.
652 *
653 * Must NOT be called from ctl->callback.
654 */
655void tb_ctl_free(struct tb_ctl *ctl)
656{
657 int i;
Mika Westerbergc9843eb2017-06-06 15:24:59 +0300658
659 if (!ctl)
660 return;
661
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200662 if (ctl->rx)
663 ring_free(ctl->rx);
664 if (ctl->tx)
665 ring_free(ctl->tx);
666
667 /* free RX packets */
668 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
669 tb_ctl_pkg_free(ctl->rx_packets[i]);
670
671
672 if (ctl->frame_pool)
673 dma_pool_destroy(ctl->frame_pool);
674 kfree(ctl);
675}
676
677/**
678 * tb_cfg_start() - start/resume the control channel
679 */
680void tb_ctl_start(struct tb_ctl *ctl)
681{
682 int i;
683 tb_ctl_info(ctl, "control channel starting...\n");
684 ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
685 ring_start(ctl->rx);
686 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
687 tb_ctl_rx_submit(ctl->rx_packets[i]);
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300688
689 ctl->running = true;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200690}
691
692/**
693 * control() - pause the control channel
694 *
695 * All invocations of ctl->callback will have finished after this method
696 * returns.
697 *
698 * Must NOT be called from ctl->callback.
699 */
700void tb_ctl_stop(struct tb_ctl *ctl)
701{
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300702 mutex_lock(&ctl->request_queue_lock);
703 ctl->running = false;
704 mutex_unlock(&ctl->request_queue_lock);
705
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200706 ring_stop(ctl->rx);
707 ring_stop(ctl->tx);
708
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300709 if (!list_empty(&ctl->request_queue))
710 tb_ctl_WARN(ctl, "dangling request in request_queue\n");
711 INIT_LIST_HEAD(&ctl->request_queue);
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200712 tb_ctl_info(ctl, "control channel stopped\n");
713}
714
715/* public interface, commands */
716
717/**
718 * tb_cfg_error() - send error packet
719 *
720 * Return: Returns 0 on success or an error code on failure.
721 */
722int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
723 enum tb_cfg_error error)
724{
725 struct cfg_error_pkg pkg = {
Mika Westerberg05c242e2017-06-06 15:25:08 +0300726 .header = tb_cfg_make_header(route),
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200727 .port = port,
728 .error = error,
729 };
730 tb_ctl_info(ctl, "resetting error on %llx:%x.\n", route, port);
731 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
732}
733
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300734static bool tb_cfg_match(const struct tb_cfg_request *req,
735 const struct ctl_pkg *pkg)
736{
737 u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
738
739 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
740 return true;
741
742 if (pkg->frame.eof != req->response_type)
743 return false;
744 if (route != tb_cfg_get_route(req->request))
745 return false;
746 if (pkg->frame.size != req->response_size)
747 return false;
748
749 if (pkg->frame.eof == TB_CFG_PKG_READ ||
750 pkg->frame.eof == TB_CFG_PKG_WRITE) {
751 const struct cfg_read_pkg *req_hdr = req->request;
752 const struct cfg_read_pkg *res_hdr = pkg->buffer;
753
754 if (req_hdr->addr.seq != res_hdr->addr.seq)
755 return false;
756 }
757
758 return true;
759}
760
761static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
762{
763 struct tb_cfg_result res;
764
765 /* Now make sure it is in expected format */
766 res = parse_header(pkg, req->response_size, req->response_type,
767 tb_cfg_get_route(req->request));
768 if (!res.err)
769 memcpy(req->response, pkg->buffer, req->response_size);
770
771 req->result = res;
772
773 /* Always complete when first response is received */
774 return true;
775}
776
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200777/**
778 * tb_cfg_reset() - send a reset packet and wait for a response
779 *
780 * If the switch at route is incorrectly configured then we will not receive a
781 * reply (even though the switch will reset). The caller should check for
782 * -ETIMEDOUT and attempt to reconfigure the switch.
783 */
784struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
785 int timeout_msec)
786{
Mika Westerberg05c242e2017-06-06 15:25:08 +0300787 struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300788 struct tb_cfg_result res = { 0 };
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200789 struct tb_cfg_header reply;
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300790 struct tb_cfg_request *req;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200791
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300792 req = tb_cfg_request_alloc();
793 if (!req) {
794 res.err = -ENOMEM;
795 return res;
796 }
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200797
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300798 req->match = tb_cfg_match;
799 req->copy = tb_cfg_copy;
800 req->request = &request;
801 req->request_size = sizeof(request);
802 req->request_type = TB_CFG_PKG_RESET;
803 req->response = &reply;
804 req->response_size = sizeof(reply);
805 req->response_type = sizeof(TB_CFG_PKG_RESET);
806
807 res = tb_cfg_request_sync(ctl, req, timeout_msec);
808
809 tb_cfg_request_put(req);
810
811 return res;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200812}
813
814/**
815 * tb_cfg_read() - read from config space into buffer
816 *
817 * Offset and length are in dwords.
818 */
819struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
820 u64 route, u32 port, enum tb_cfg_space space,
821 u32 offset, u32 length, int timeout_msec)
822{
823 struct tb_cfg_result res = { 0 };
824 struct cfg_read_pkg request = {
Mika Westerberg05c242e2017-06-06 15:25:08 +0300825 .header = tb_cfg_make_header(route),
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200826 .addr = {
827 .port = port,
828 .space = space,
829 .offset = offset,
830 .length = length,
831 },
832 };
833 struct cfg_write_pkg reply;
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300834 int retries = 0;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200835
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300836 while (retries < TB_CTL_RETRIES) {
837 struct tb_cfg_request *req;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200838
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300839 req = tb_cfg_request_alloc();
840 if (!req) {
841 res.err = -ENOMEM;
842 return res;
843 }
844
845 request.addr.seq = retries++;
846
847 req->match = tb_cfg_match;
848 req->copy = tb_cfg_copy;
849 req->request = &request;
850 req->request_size = sizeof(request);
851 req->request_type = TB_CFG_PKG_READ;
852 req->response = &reply;
853 req->response_size = 12 + 4 * length;
854 req->response_type = TB_CFG_PKG_READ;
855
856 res = tb_cfg_request_sync(ctl, req, timeout_msec);
857
858 tb_cfg_request_put(req);
859
860 if (res.err != -ETIMEDOUT)
861 break;
862
863 /* Wait a bit (arbitrary time) until we send a retry */
864 usleep_range(10, 100);
865 }
866
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200867 if (res.err)
868 return res;
869
870 res.response_port = reply.addr.port;
871 res.err = check_config_address(reply.addr, space, offset, length);
872 if (!res.err)
873 memcpy(buffer, &reply.data, 4 * length);
874 return res;
875}
876
877/**
878 * tb_cfg_write() - write from buffer into config space
879 *
880 * Offset and length are in dwords.
881 */
Mika Westerberg16a12582017-06-06 15:24:53 +0300882struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200883 u64 route, u32 port, enum tb_cfg_space space,
884 u32 offset, u32 length, int timeout_msec)
885{
886 struct tb_cfg_result res = { 0 };
887 struct cfg_write_pkg request = {
Mika Westerberg05c242e2017-06-06 15:25:08 +0300888 .header = tb_cfg_make_header(route),
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200889 .addr = {
890 .port = port,
891 .space = space,
892 .offset = offset,
893 .length = length,
894 },
895 };
896 struct cfg_read_pkg reply;
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300897 int retries = 0;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200898
899 memcpy(&request.data, buffer, length * 4);
900
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300901 while (retries < TB_CTL_RETRIES) {
902 struct tb_cfg_request *req;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200903
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300904 req = tb_cfg_request_alloc();
905 if (!req) {
906 res.err = -ENOMEM;
907 return res;
908 }
909
910 request.addr.seq = retries++;
911
912 req->match = tb_cfg_match;
913 req->copy = tb_cfg_copy;
914 req->request = &request;
915 req->request_size = 12 + 4 * length;
916 req->request_type = TB_CFG_PKG_WRITE;
917 req->response = &reply;
918 req->response_size = sizeof(reply);
919 req->response_type = TB_CFG_PKG_WRITE;
920
921 res = tb_cfg_request_sync(ctl, req, timeout_msec);
922
923 tb_cfg_request_put(req);
924
925 if (res.err != -ETIMEDOUT)
926 break;
927
928 /* Wait a bit (arbitrary time) until we send a retry */
929 usleep_range(10, 100);
930 }
931
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200932 if (res.err)
933 return res;
934
935 res.response_port = reply.addr.port;
936 res.err = check_config_address(reply.addr, space, offset, length);
937 return res;
938}
939
940int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
941 enum tb_cfg_space space, u32 offset, u32 length)
942{
943 struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
944 space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300945 switch (res.err) {
946 case 0:
947 /* Success */
948 break;
949
950 case 1:
951 /* Thunderbolt error, tb_error holds the actual number */
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200952 tb_cfg_print_error(ctl, &res);
953 return -EIO;
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300954
955 case -ETIMEDOUT:
956 tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
957 space, offset);
958 break;
959
960 default:
961 WARN(1, "tb_cfg_read: %d\n", res.err);
962 break;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200963 }
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200964 return res.err;
965}
966
Mika Westerberg16a12582017-06-06 15:24:53 +0300967int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200968 enum tb_cfg_space space, u32 offset, u32 length)
969{
970 struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
971 space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300972 switch (res.err) {
973 case 0:
974 /* Success */
975 break;
976
977 case 1:
978 /* Thunderbolt error, tb_error holds the actual number */
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200979 tb_cfg_print_error(ctl, &res);
980 return -EIO;
Mika Westerbergd7f781b2017-06-06 15:25:10 +0300981
982 case -ETIMEDOUT:
983 tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
984 space, offset);
985 break;
986
987 default:
988 WARN(1, "tb_cfg_write: %d\n", res.err);
989 break;
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200990 }
Andreas Noeverf25bf6f2014-06-03 22:03:59 +0200991 return res.err;
992}
993
994/**
995 * tb_cfg_get_upstream_port() - get upstream port number of switch at route
996 *
997 * Reads the first dword from the switches TB_CFG_SWITCH config area and
998 * returns the port number from which the reply originated.
999 *
1000 * Return: Returns the upstream port number on success or an error code on
1001 * failure.
1002 */
1003int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
1004{
1005 u32 dummy;
1006 struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
1007 TB_CFG_SWITCH, 0, 1,
1008 TB_CFG_DEFAULT_TIMEOUT);
1009 if (res.err == 1)
1010 return -EIO;
1011 if (res.err)
1012 return res.err;
1013 return res.response_port;
1014}