blob: 9886d42e1ff2b20bb9a8bd932a7ca9330ebac4ea [file] [log] [blame]
Lu Baolu4786d2e2018-05-21 16:39:49 +03001// SPDX-License-Identifier: GPL-2.0
Lee Jones0e1acec2020-07-03 18:41:34 +01002/*
Lu Baoludfba2172017-12-08 17:59:10 +02003 * xhci-dbgtty.c - tty glue for xHCI debug capability
4 *
5 * Copyright (C) 2017 Intel Corporation
6 *
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
8 */
9
10#include <linux/slab.h>
11#include <linux/tty.h>
12#include <linux/tty_flip.h>
13
14#include "xhci.h"
15#include "xhci-dbgcap.h"
16
17static unsigned int
18dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
19{
20 unsigned int len;
21
22 len = kfifo_len(&port->write_fifo);
23 if (len < size)
24 size = len;
25 if (size != 0)
26 size = kfifo_out(&port->write_fifo, packet, size);
27 return size;
28}
29
30static int dbc_start_tx(struct dbc_port *port)
31 __releases(&port->port_lock)
32 __acquires(&port->port_lock)
33{
34 int len;
35 struct dbc_request *req;
36 int status = 0;
37 bool do_tty_wake = false;
38 struct list_head *pool = &port->write_pool;
39
40 while (!list_empty(pool)) {
41 req = list_entry(pool->next, struct dbc_request, list_pool);
42 len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
43 if (len == 0)
44 break;
45 do_tty_wake = true;
46
47 req->length = len;
48 list_del(&req->list_pool);
49
50 spin_unlock(&port->port_lock);
Mathias Nymane0aa56d2020-07-23 17:45:25 +030051 status = dbc_ep_queue(req);
Lu Baoludfba2172017-12-08 17:59:10 +020052 spin_lock(&port->port_lock);
53
54 if (status) {
55 list_add(&req->list_pool, pool);
56 break;
57 }
58 }
59
60 if (do_tty_wake && port->port.tty)
61 tty_wakeup(port->port.tty);
62
63 return status;
64}
65
66static void dbc_start_rx(struct dbc_port *port)
67 __releases(&port->port_lock)
68 __acquires(&port->port_lock)
69{
70 struct dbc_request *req;
71 int status;
72 struct list_head *pool = &port->read_pool;
73
74 while (!list_empty(pool)) {
75 if (!port->port.tty)
76 break;
77
78 req = list_entry(pool->next, struct dbc_request, list_pool);
79 list_del(&req->list_pool);
80 req->length = DBC_MAX_PACKET;
81
82 spin_unlock(&port->port_lock);
Mathias Nymane0aa56d2020-07-23 17:45:25 +030083 status = dbc_ep_queue(req);
Lu Baoludfba2172017-12-08 17:59:10 +020084 spin_lock(&port->port_lock);
85
86 if (status) {
87 list_add(&req->list_pool, pool);
88 break;
89 }
90 }
91}
92
93static void
Mathias Nymanf39f3af2020-07-23 17:45:20 +030094dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req)
Lu Baoludfba2172017-12-08 17:59:10 +020095{
Lu Baolua098dc82018-03-08 17:17:15 +020096 unsigned long flags;
Lu Baoludfba2172017-12-08 17:59:10 +020097 struct dbc_port *port = &dbc->port;
98
Lu Baolua098dc82018-03-08 17:17:15 +020099 spin_lock_irqsave(&port->port_lock, flags);
Lu Baoludfba2172017-12-08 17:59:10 +0200100 list_add_tail(&req->list_pool, &port->read_queue);
101 tasklet_schedule(&port->push);
Lu Baolua098dc82018-03-08 17:17:15 +0200102 spin_unlock_irqrestore(&port->port_lock, flags);
Lu Baoludfba2172017-12-08 17:59:10 +0200103}
104
Mathias Nymanf39f3af2020-07-23 17:45:20 +0300105static void dbc_write_complete(struct xhci_dbc *dbc, struct dbc_request *req)
Lu Baoludfba2172017-12-08 17:59:10 +0200106{
Lu Baolua098dc82018-03-08 17:17:15 +0200107 unsigned long flags;
Lu Baoludfba2172017-12-08 17:59:10 +0200108 struct dbc_port *port = &dbc->port;
109
Lu Baolua098dc82018-03-08 17:17:15 +0200110 spin_lock_irqsave(&port->port_lock, flags);
Lu Baoludfba2172017-12-08 17:59:10 +0200111 list_add(&req->list_pool, &port->write_pool);
112 switch (req->status) {
113 case 0:
114 dbc_start_tx(port);
115 break;
116 case -ESHUTDOWN:
117 break;
118 default:
Mathias Nymanf39f3af2020-07-23 17:45:20 +0300119 dev_warn(dbc->dev, "unexpected write complete status %d\n",
Lu Baoludfba2172017-12-08 17:59:10 +0200120 req->status);
121 break;
122 }
Lu Baolua098dc82018-03-08 17:17:15 +0200123 spin_unlock_irqrestore(&port->port_lock, flags);
Lu Baoludfba2172017-12-08 17:59:10 +0200124}
125
Mathias Nymane0aa56d2020-07-23 17:45:25 +0300126static void xhci_dbc_free_req(struct dbc_request *req)
Lu Baoludfba2172017-12-08 17:59:10 +0200127{
128 kfree(req->buf);
Mathias Nymane0aa56d2020-07-23 17:45:25 +0300129 dbc_free_request(req);
Lu Baoludfba2172017-12-08 17:59:10 +0200130}
131
132static int
Mathias Nymane0aa56d2020-07-23 17:45:25 +0300133xhci_dbc_alloc_requests(struct xhci_dbc *dbc, unsigned int direction,
134 struct list_head *head,
Mathias Nymanf39f3af2020-07-23 17:45:20 +0300135 void (*fn)(struct xhci_dbc *, struct dbc_request *))
Lu Baoludfba2172017-12-08 17:59:10 +0200136{
137 int i;
138 struct dbc_request *req;
139
140 for (i = 0; i < DBC_QUEUE_SIZE; i++) {
Mathias Nymane0aa56d2020-07-23 17:45:25 +0300141 req = dbc_alloc_request(dbc, direction, GFP_KERNEL);
Lu Baoludfba2172017-12-08 17:59:10 +0200142 if (!req)
143 break;
144
145 req->length = DBC_MAX_PACKET;
146 req->buf = kmalloc(req->length, GFP_KERNEL);
147 if (!req->buf) {
Mathias Nymane0aa56d2020-07-23 17:45:25 +0300148 dbc_free_request(req);
Lu Baoludfba2172017-12-08 17:59:10 +0200149 break;
150 }
151
152 req->complete = fn;
153 list_add_tail(&req->list_pool, head);
154 }
155
156 return list_empty(head) ? -ENOMEM : 0;
157}
158
159static void
Mathias Nymane0aa56d2020-07-23 17:45:25 +0300160xhci_dbc_free_requests(struct list_head *head)
Lu Baoludfba2172017-12-08 17:59:10 +0200161{
162 struct dbc_request *req;
163
164 while (!list_empty(head)) {
165 req = list_entry(head->next, struct dbc_request, list_pool);
166 list_del(&req->list_pool);
Mathias Nymane0aa56d2020-07-23 17:45:25 +0300167 xhci_dbc_free_req(req);
Lu Baoludfba2172017-12-08 17:59:10 +0200168 }
169}
170
171static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
172{
173 struct dbc_port *port = driver->driver_state;
174
175 tty->driver_data = port;
176
177 return tty_port_install(&port->port, driver, tty);
178}
179
180static int dbc_tty_open(struct tty_struct *tty, struct file *file)
181{
182 struct dbc_port *port = tty->driver_data;
183
184 return tty_port_open(&port->port, tty, file);
185}
186
187static void dbc_tty_close(struct tty_struct *tty, struct file *file)
188{
189 struct dbc_port *port = tty->driver_data;
190
191 tty_port_close(&port->port, tty, file);
192}
193
194static int dbc_tty_write(struct tty_struct *tty,
195 const unsigned char *buf,
196 int count)
197{
198 struct dbc_port *port = tty->driver_data;
199 unsigned long flags;
200
201 spin_lock_irqsave(&port->port_lock, flags);
202 if (count)
203 count = kfifo_in(&port->write_fifo, buf, count);
204 dbc_start_tx(port);
205 spin_unlock_irqrestore(&port->port_lock, flags);
206
207 return count;
208}
209
210static int dbc_tty_put_char(struct tty_struct *tty, unsigned char ch)
211{
212 struct dbc_port *port = tty->driver_data;
213 unsigned long flags;
214 int status;
215
216 spin_lock_irqsave(&port->port_lock, flags);
217 status = kfifo_put(&port->write_fifo, ch);
218 spin_unlock_irqrestore(&port->port_lock, flags);
219
220 return status;
221}
222
223static void dbc_tty_flush_chars(struct tty_struct *tty)
224{
225 struct dbc_port *port = tty->driver_data;
226 unsigned long flags;
227
228 spin_lock_irqsave(&port->port_lock, flags);
229 dbc_start_tx(port);
230 spin_unlock_irqrestore(&port->port_lock, flags);
231}
232
233static int dbc_tty_write_room(struct tty_struct *tty)
234{
235 struct dbc_port *port = tty->driver_data;
236 unsigned long flags;
237 int room = 0;
238
239 spin_lock_irqsave(&port->port_lock, flags);
240 room = kfifo_avail(&port->write_fifo);
241 spin_unlock_irqrestore(&port->port_lock, flags);
242
243 return room;
244}
245
246static int dbc_tty_chars_in_buffer(struct tty_struct *tty)
247{
248 struct dbc_port *port = tty->driver_data;
249 unsigned long flags;
250 int chars = 0;
251
252 spin_lock_irqsave(&port->port_lock, flags);
253 chars = kfifo_len(&port->write_fifo);
254 spin_unlock_irqrestore(&port->port_lock, flags);
255
256 return chars;
257}
258
259static void dbc_tty_unthrottle(struct tty_struct *tty)
260{
261 struct dbc_port *port = tty->driver_data;
262 unsigned long flags;
263
264 spin_lock_irqsave(&port->port_lock, flags);
265 tasklet_schedule(&port->push);
266 spin_unlock_irqrestore(&port->port_lock, flags);
267}
268
269static const struct tty_operations dbc_tty_ops = {
270 .install = dbc_tty_install,
271 .open = dbc_tty_open,
272 .close = dbc_tty_close,
273 .write = dbc_tty_write,
274 .put_char = dbc_tty_put_char,
275 .flush_chars = dbc_tty_flush_chars,
276 .write_room = dbc_tty_write_room,
277 .chars_in_buffer = dbc_tty_chars_in_buffer,
278 .unthrottle = dbc_tty_unthrottle,
279};
280
281static struct tty_driver *dbc_tty_driver;
282
283int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci)
284{
285 int status;
286 struct xhci_dbc *dbc = xhci->dbc;
287
288 dbc_tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW |
289 TTY_DRIVER_DYNAMIC_DEV);
290 if (IS_ERR(dbc_tty_driver)) {
291 status = PTR_ERR(dbc_tty_driver);
292 dbc_tty_driver = NULL;
293 return status;
294 }
295
296 dbc_tty_driver->driver_name = "dbc_serial";
297 dbc_tty_driver->name = "ttyDBC";
298
299 dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
300 dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
301 dbc_tty_driver->init_termios = tty_std_termios;
302 dbc_tty_driver->init_termios.c_cflag =
303 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
304 dbc_tty_driver->init_termios.c_ispeed = 9600;
305 dbc_tty_driver->init_termios.c_ospeed = 9600;
306 dbc_tty_driver->driver_state = &dbc->port;
307
308 tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
309
310 status = tty_register_driver(dbc_tty_driver);
311 if (status) {
312 xhci_err(xhci,
313 "can't register dbc tty driver, err %d\n", status);
314 put_tty_driver(dbc_tty_driver);
315 dbc_tty_driver = NULL;
316 }
317
318 return status;
319}
320
321void xhci_dbc_tty_unregister_driver(void)
322{
Zhengjun Xing7fc65d42018-04-13 15:55:34 +0300323 if (dbc_tty_driver) {
324 tty_unregister_driver(dbc_tty_driver);
325 put_tty_driver(dbc_tty_driver);
326 dbc_tty_driver = NULL;
327 }
Lu Baoludfba2172017-12-08 17:59:10 +0200328}
329
330static void dbc_rx_push(unsigned long _port)
331{
332 struct dbc_request *req;
333 struct tty_struct *tty;
Lu Baolua098dc82018-03-08 17:17:15 +0200334 unsigned long flags;
Lu Baoludfba2172017-12-08 17:59:10 +0200335 bool do_push = false;
336 bool disconnect = false;
337 struct dbc_port *port = (void *)_port;
338 struct list_head *queue = &port->read_queue;
339
Lu Baolua098dc82018-03-08 17:17:15 +0200340 spin_lock_irqsave(&port->port_lock, flags);
Lu Baoludfba2172017-12-08 17:59:10 +0200341 tty = port->port.tty;
342 while (!list_empty(queue)) {
343 req = list_first_entry(queue, struct dbc_request, list_pool);
344
345 if (tty && tty_throttled(tty))
346 break;
347
348 switch (req->status) {
349 case 0:
350 break;
351 case -ESHUTDOWN:
352 disconnect = true;
353 break;
354 default:
355 pr_warn("ttyDBC0: unexpected RX status %d\n",
356 req->status);
357 break;
358 }
359
360 if (req->actual) {
361 char *packet = req->buf;
362 unsigned int n, size = req->actual;
363 int count;
364
365 n = port->n_read;
366 if (n) {
367 packet += n;
368 size -= n;
369 }
370
371 count = tty_insert_flip_string(&port->port, packet,
372 size);
373 if (count)
374 do_push = true;
375 if (count != size) {
376 port->n_read += count;
377 break;
378 }
379 port->n_read = 0;
380 }
381
382 list_move(&req->list_pool, &port->read_pool);
383 }
384
385 if (do_push)
386 tty_flip_buffer_push(&port->port);
387
388 if (!list_empty(queue) && tty) {
389 if (!tty_throttled(tty)) {
390 if (do_push)
391 tasklet_schedule(&port->push);
392 else
393 pr_warn("ttyDBC0: RX not scheduled?\n");
394 }
395 }
396
397 if (!disconnect)
398 dbc_start_rx(port);
399
Lu Baolua098dc82018-03-08 17:17:15 +0200400 spin_unlock_irqrestore(&port->port_lock, flags);
Lu Baoludfba2172017-12-08 17:59:10 +0200401}
402
403static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
404{
Lu Baolua098dc82018-03-08 17:17:15 +0200405 unsigned long flags;
Lu Baoludfba2172017-12-08 17:59:10 +0200406 struct dbc_port *port = container_of(_port, struct dbc_port, port);
407
Lu Baolua098dc82018-03-08 17:17:15 +0200408 spin_lock_irqsave(&port->port_lock, flags);
Lu Baoludfba2172017-12-08 17:59:10 +0200409 dbc_start_rx(port);
Lu Baolua098dc82018-03-08 17:17:15 +0200410 spin_unlock_irqrestore(&port->port_lock, flags);
Lu Baoludfba2172017-12-08 17:59:10 +0200411
412 return 0;
413}
414
415static const struct tty_port_operations dbc_port_ops = {
416 .activate = dbc_port_activate,
417};
418
419static void
Mathias Nyman91aaf972020-07-23 17:45:19 +0300420xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port)
Lu Baoludfba2172017-12-08 17:59:10 +0200421{
422 tty_port_init(&port->port);
423 spin_lock_init(&port->port_lock);
424 tasklet_init(&port->push, dbc_rx_push, (unsigned long)port);
425 INIT_LIST_HEAD(&port->read_pool);
426 INIT_LIST_HEAD(&port->read_queue);
427 INIT_LIST_HEAD(&port->write_pool);
428
Mathias Nyman91aaf972020-07-23 17:45:19 +0300429 port->in = get_in_ep(dbc);
430 port->out = get_out_ep(dbc);
Lu Baoludfba2172017-12-08 17:59:10 +0200431 port->port.ops = &dbc_port_ops;
432 port->n_read = 0;
433}
434
435static void
436xhci_dbc_tty_exit_port(struct dbc_port *port)
437{
438 tasklet_kill(&port->push);
439 tty_port_destroy(&port->port);
440}
441
Mathias Nymanb396fa32020-07-23 17:45:18 +0300442int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
Lu Baoludfba2172017-12-08 17:59:10 +0200443{
444 int ret;
445 struct device *tty_dev;
Lu Baoludfba2172017-12-08 17:59:10 +0200446 struct dbc_port *port = &dbc->port;
447
Mathias Nyman91aaf972020-07-23 17:45:19 +0300448 xhci_dbc_tty_init_port(dbc, port);
Lu Baoludfba2172017-12-08 17:59:10 +0200449 tty_dev = tty_port_register_device(&port->port,
450 dbc_tty_driver, 0, NULL);
Dan Carpenter29f65332018-03-16 16:32:58 +0200451 if (IS_ERR(tty_dev)) {
452 ret = PTR_ERR(tty_dev);
Lu Baoludfba2172017-12-08 17:59:10 +0200453 goto register_fail;
Dan Carpenter29f65332018-03-16 16:32:58 +0200454 }
Lu Baoludfba2172017-12-08 17:59:10 +0200455
456 ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
457 if (ret)
458 goto buf_alloc_fail;
459
Mathias Nymane0aa56d2020-07-23 17:45:25 +0300460 ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool,
Lu Baoludfba2172017-12-08 17:59:10 +0200461 dbc_read_complete);
462 if (ret)
463 goto request_fail;
464
Mathias Nymane0aa56d2020-07-23 17:45:25 +0300465 ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool,
Lu Baoludfba2172017-12-08 17:59:10 +0200466 dbc_write_complete);
467 if (ret)
468 goto request_fail;
469
470 port->registered = true;
471
472 return 0;
473
474request_fail:
Mathias Nymane0aa56d2020-07-23 17:45:25 +0300475 xhci_dbc_free_requests(&port->read_pool);
476 xhci_dbc_free_requests(&port->write_pool);
Lu Baoludfba2172017-12-08 17:59:10 +0200477 kfifo_free(&port->write_fifo);
478
479buf_alloc_fail:
480 tty_unregister_device(dbc_tty_driver, 0);
481
482register_fail:
483 xhci_dbc_tty_exit_port(port);
484
Mathias Nymanb396fa32020-07-23 17:45:18 +0300485 dev_err(dbc->dev, "can't register tty port, err %d\n", ret);
Lu Baoludfba2172017-12-08 17:59:10 +0200486
487 return ret;
488}
489
Mathias Nymanb396fa32020-07-23 17:45:18 +0300490void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
Lu Baoludfba2172017-12-08 17:59:10 +0200491{
Lu Baoludfba2172017-12-08 17:59:10 +0200492 struct dbc_port *port = &dbc->port;
493
494 tty_unregister_device(dbc_tty_driver, 0);
495 xhci_dbc_tty_exit_port(port);
496 port->registered = false;
497
498 kfifo_free(&port->write_fifo);
Mathias Nymane0aa56d2020-07-23 17:45:25 +0300499 xhci_dbc_free_requests(&port->read_pool);
500 xhci_dbc_free_requests(&port->read_queue);
501 xhci_dbc_free_requests(&port->write_pool);
Lu Baoludfba2172017-12-08 17:59:10 +0200502}