blob: 8167d379e115ba5ae7874478f863d0b16c84f4a4 [file] [log] [blame]
Greg Kroah-Hartman5fd54ac2017-11-03 11:28:30 +01001// SPDX-License-Identifier: GPL-2.0+
David Brownellc1dca562008-06-19 17:51:44 -07002/*
3 * u_serial.c - utilities for USB gadget "serial port"/TTY support
4 *
5 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
6 * Copyright (C) 2008 David Brownell
7 * Copyright (C) 2008 by Nokia Corporation
8 *
9 * This code also borrows from usbserial.c, which is
10 * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
11 * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
12 * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
David Brownellc1dca562008-06-19 17:51:44 -070013 */
14
15/* #define VERBOSE_DEBUG */
16
17#include <linux/kernel.h>
stephane duverger1e413942010-06-29 16:57:25 +020018#include <linux/sched.h>
David Brownellc1dca562008-06-19 17:51:44 -070019#include <linux/device.h>
20#include <linux/delay.h>
21#include <linux/tty.h>
22#include <linux/tty_flip.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Paul Gortmakerf940fcd2011-05-27 09:56:31 -040024#include <linux/export.h>
Sebastian Andrzej Siewior3249ca22012-12-23 21:10:04 +010025#include <linux/module.h>
Baolin Wanga5beaaf2015-11-21 15:44:53 +080026#include <linux/console.h>
27#include <linux/kthread.h>
Michał Mirosław8b4c62a2018-12-16 21:23:47 +010028#include <linux/workqueue.h>
Lu Baolua622ee92017-11-28 12:46:28 +080029#include <linux/kfifo.h>
David Brownellc1dca562008-06-19 17:51:44 -070030
31#include "u_serial.h"
32
33
34/*
35 * This component encapsulates the TTY layer glue needed to provide basic
36 * "serial port" functionality through the USB gadget stack. Each such
37 * port is exposed through a /dev/ttyGS* node.
38 *
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +010039 * After this module has been loaded, the individual TTY port can be requested
40 * (gserial_alloc_line()) and it will stay available until they are removed
41 * (gserial_free_line()). Each one may be connected to a USB function
42 * (gserial_connect), or disconnected (with gserial_disconnect) when the USB
43 * host issues a config change event. Data can only flow when the port is
44 * connected to the host.
David Brownellc1dca562008-06-19 17:51:44 -070045 *
46 * A given TTY port can be made available in multiple configurations.
47 * For example, each one might expose a ttyGS0 node which provides a
48 * login application. In one case that might use CDC ACM interface 0,
49 * while another configuration might use interface 3 for that. The
50 * work to handle that (including descriptor management) is not part
51 * of this component.
52 *
53 * Configurations may expose more than one TTY port. For example, if
54 * ttyGS0 provides login service, then ttyGS1 might provide dialer access
55 * for a telephone or fax link. And ttyGS2 might be something that just
56 * needs a simple byte stream interface for some messaging protocol that
57 * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
Richard Leitnerc572a212014-08-21 08:57:28 +020058 *
59 *
David Brownellc1dca562008-06-19 17:51:44 -070060 * gserial is the lifecycle interface, used by USB functions
61 * gs_port is the I/O nexus, used by the tty driver
62 * tty_struct links to the tty/filesystem framework
63 *
64 * gserial <---> gs_port ... links will be null when the USB link is
David Brownell1f1ba112008-08-06 18:49:57 -070065 * inactive; managed by gserial_{connect,disconnect}(). each gserial
66 * instance can wrap its own USB control protocol.
David Brownellc1dca562008-06-19 17:51:44 -070067 * gserial->ioport == usb_ep->driver_data ... gs_port
68 * gs_port->port_usb ... gserial
69 *
70 * gs_port <---> tty_struct ... links will be null when the TTY file
71 * isn't opened; managed by gs_open()/gs_close()
72 * gserial->port_tty ... tty_struct
73 * tty_struct->driver_data ... gserial
74 */
75
76/* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
77 * next layer of buffering. For TX that's a circular buffer; for RX
78 * consider it a NOP. A third layer is provided by the TTY code.
79 */
80#define QUEUE_SIZE 16
81#define WRITE_BUF_SIZE 8192 /* TX only */
Baolin Wanga5beaaf2015-11-21 15:44:53 +080082#define GS_CONSOLE_BUF_SIZE 8192
David Brownellc1dca562008-06-19 17:51:44 -070083
Baolin Wanga5beaaf2015-11-21 15:44:53 +080084/* console info */
Michał Mirosławfe1ea632019-08-10 10:42:49 +020085struct gs_console {
86 struct console console;
87 struct work_struct work;
88 spinlock_t lock;
89 struct usb_request *req;
90 struct kfifo buf;
Michał Mirosławef9b4572019-08-10 10:42:52 +020091 size_t missed;
Baolin Wanga5beaaf2015-11-21 15:44:53 +080092};
93
David Brownellc1dca562008-06-19 17:51:44 -070094/*
95 * The port structure holds info for each port, one for each minor number
96 * (and thus for each /dev/ node).
97 */
98struct gs_port {
Jiri Slaby266e37e2012-04-02 13:54:47 +020099 struct tty_port port;
David Brownellc1dca562008-06-19 17:51:44 -0700100 spinlock_t port_lock; /* guard port_* access */
101
102 struct gserial *port_usb;
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200103#ifdef CONFIG_U_SERIAL_CONSOLE
104 struct gs_console *console;
105#endif
David Brownellc1dca562008-06-19 17:51:44 -0700106
David Brownellc1dca562008-06-19 17:51:44 -0700107 u8 port_num;
108
David Brownellc1dca562008-06-19 17:51:44 -0700109 struct list_head read_pool;
Jim Sung28609d42010-11-04 18:47:51 -0700110 int read_started;
111 int read_allocated;
David Brownell937ef732008-07-07 12:16:08 -0700112 struct list_head read_queue;
113 unsigned n_read;
Michał Mirosław8b4c62a2018-12-16 21:23:47 +0100114 struct delayed_work push;
David Brownellc1dca562008-06-19 17:51:44 -0700115
116 struct list_head write_pool;
Jim Sung28609d42010-11-04 18:47:51 -0700117 int write_started;
118 int write_allocated;
Lu Baolua622ee92017-11-28 12:46:28 +0800119 struct kfifo port_write_buf;
David Brownellc1dca562008-06-19 17:51:44 -0700120 wait_queue_head_t drain_wait; /* wait while writes drain */
Philip Oberstaller3e9d3d22015-03-27 17:42:18 +0100121 bool write_busy;
Peter Hurleyb140dfe2015-10-10 16:00:53 -0400122 wait_queue_head_t close_wait;
David Brownellc1dca562008-06-19 17:51:44 -0700123
124 /* REVISIT this state ... */
125 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
126};
127
David Brownellc1dca562008-06-19 17:51:44 -0700128static struct portmaster {
129 struct mutex lock; /* protect open/close */
130 struct gs_port *port;
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +0100131} ports[MAX_U_SERIAL_PORTS];
David Brownellc1dca562008-06-19 17:51:44 -0700132
133#define GS_CLOSE_TIMEOUT 15 /* seconds */
134
135
136
137#ifdef VERBOSE_DEBUG
Andrzej Pietrasiewiczea0e6272012-06-11 11:13:15 +0200138#ifndef pr_vdebug
David Brownellc1dca562008-06-19 17:51:44 -0700139#define pr_vdebug(fmt, arg...) \
140 pr_debug(fmt, ##arg)
Andrzej Pietrasiewiczea0e6272012-06-11 11:13:15 +0200141#endif /* pr_vdebug */
David Brownellc1dca562008-06-19 17:51:44 -0700142#else
Bo Shen273daf22013-03-13 16:54:07 +0800143#ifndef pr_vdebug
David Brownellc1dca562008-06-19 17:51:44 -0700144#define pr_vdebug(fmt, arg...) \
145 ({ if (0) pr_debug(fmt, ##arg); })
Andrzej Pietrasiewiczea0e6272012-06-11 11:13:15 +0200146#endif /* pr_vdebug */
David Brownellc1dca562008-06-19 17:51:44 -0700147#endif
148
149/*-------------------------------------------------------------------------*/
150
David Brownellc1dca562008-06-19 17:51:44 -0700151/* I/O glue between TTY (upper) and USB function (lower) driver layers */
152
153/*
154 * gs_alloc_req
155 *
156 * Allocate a usb_request and its buffer. Returns a pointer to the
157 * usb_request or NULL if there is an error.
158 */
David Brownell1f1ba112008-08-06 18:49:57 -0700159struct usb_request *
David Brownellc1dca562008-06-19 17:51:44 -0700160gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
161{
162 struct usb_request *req;
163
164 req = usb_ep_alloc_request(ep, kmalloc_flags);
165
166 if (req != NULL) {
167 req->length = len;
168 req->buf = kmalloc(len, kmalloc_flags);
169 if (req->buf == NULL) {
170 usb_ep_free_request(ep, req);
171 return NULL;
172 }
173 }
174
175 return req;
176}
Sebastian Andrzej Siewior3249ca22012-12-23 21:10:04 +0100177EXPORT_SYMBOL_GPL(gs_alloc_req);
David Brownellc1dca562008-06-19 17:51:44 -0700178
179/*
180 * gs_free_req
181 *
182 * Free a usb_request and its buffer.
183 */
David Brownell1f1ba112008-08-06 18:49:57 -0700184void gs_free_req(struct usb_ep *ep, struct usb_request *req)
David Brownellc1dca562008-06-19 17:51:44 -0700185{
186 kfree(req->buf);
187 usb_ep_free_request(ep, req);
188}
Sebastian Andrzej Siewior3249ca22012-12-23 21:10:04 +0100189EXPORT_SYMBOL_GPL(gs_free_req);
David Brownellc1dca562008-06-19 17:51:44 -0700190
191/*
192 * gs_send_packet
193 *
194 * If there is data to send, a packet is built in the given
195 * buffer and the size is returned. If there is no data to
196 * send, 0 is returned.
197 *
198 * Called with port_lock held.
199 */
200static unsigned
201gs_send_packet(struct gs_port *port, char *packet, unsigned size)
202{
203 unsigned len;
204
Lu Baolua622ee92017-11-28 12:46:28 +0800205 len = kfifo_len(&port->port_write_buf);
David Brownellc1dca562008-06-19 17:51:44 -0700206 if (len < size)
207 size = len;
208 if (size != 0)
Lu Baolua622ee92017-11-28 12:46:28 +0800209 size = kfifo_out(&port->port_write_buf, packet, size);
David Brownellc1dca562008-06-19 17:51:44 -0700210 return size;
211}
212
213/*
214 * gs_start_tx
215 *
216 * This function finds available write requests, calls
217 * gs_send_packet to fill these packets with data, and
218 * continues until either there are no more write requests
219 * available or no more data to send. This function is
220 * run whenever data arrives or write requests are available.
221 *
222 * Context: caller owns port_lock; port_usb is non-null.
223 */
224static int gs_start_tx(struct gs_port *port)
225/*
226__releases(&port->port_lock)
227__acquires(&port->port_lock)
228*/
229{
230 struct list_head *pool = &port->write_pool;
Baolin Wang511a36d2016-06-30 17:10:23 +0800231 struct usb_ep *in;
David Brownellc1dca562008-06-19 17:51:44 -0700232 int status = 0;
233 bool do_tty_wake = false;
234
Baolin Wang511a36d2016-06-30 17:10:23 +0800235 if (!port->port_usb)
236 return status;
237
238 in = port->port_usb->in;
239
Philip Oberstaller3e9d3d22015-03-27 17:42:18 +0100240 while (!port->write_busy && !list_empty(pool)) {
David Brownellc1dca562008-06-19 17:51:44 -0700241 struct usb_request *req;
242 int len;
243
Jim Sung28609d42010-11-04 18:47:51 -0700244 if (port->write_started >= QUEUE_SIZE)
245 break;
246
David Brownellc1dca562008-06-19 17:51:44 -0700247 req = list_entry(pool->next, struct usb_request, list);
248 len = gs_send_packet(port, req->buf, in->maxpacket);
249 if (len == 0) {
250 wake_up_interruptible(&port->drain_wait);
251 break;
252 }
253 do_tty_wake = true;
254
255 req->length = len;
256 list_del(&req->list);
Lu Baolua622ee92017-11-28 12:46:28 +0800257 req->zero = kfifo_is_empty(&port->port_write_buf);
David Brownellc1dca562008-06-19 17:51:44 -0700258
Richard Leitnerc572a212014-08-21 08:57:28 +0200259 pr_vdebug("ttyGS%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
260 port->port_num, len, *((u8 *)req->buf),
261 *((u8 *)req->buf+1), *((u8 *)req->buf+2));
David Brownellc1dca562008-06-19 17:51:44 -0700262
263 /* Drop lock while we call out of driver; completions
264 * could be issued while we do so. Disconnection may
265 * happen too; maybe immediately before we queue this!
266 *
267 * NOTE that we may keep sending data for a while after
268 * the TTY closed (dev->ioport->port_tty is NULL).
269 */
Philip Oberstaller3e9d3d22015-03-27 17:42:18 +0100270 port->write_busy = true;
David Brownellc1dca562008-06-19 17:51:44 -0700271 spin_unlock(&port->port_lock);
272 status = usb_ep_queue(in, req, GFP_ATOMIC);
273 spin_lock(&port->port_lock);
Philip Oberstaller3e9d3d22015-03-27 17:42:18 +0100274 port->write_busy = false;
David Brownellc1dca562008-06-19 17:51:44 -0700275
276 if (status) {
277 pr_debug("%s: %s %s err %d\n",
278 __func__, "queue", in->name, status);
279 list_add(&req->list, pool);
280 break;
281 }
282
Jim Sung28609d42010-11-04 18:47:51 -0700283 port->write_started++;
284
David Brownellc1dca562008-06-19 17:51:44 -0700285 /* abort immediately after disconnect */
286 if (!port->port_usb)
287 break;
288 }
289
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200290 if (do_tty_wake && port->port.tty)
291 tty_wakeup(port->port.tty);
David Brownellc1dca562008-06-19 17:51:44 -0700292 return status;
293}
294
David Brownellc1dca562008-06-19 17:51:44 -0700295/*
296 * Context: caller owns port_lock, and port_usb is set
297 */
298static unsigned gs_start_rx(struct gs_port *port)
299/*
300__releases(&port->port_lock)
301__acquires(&port->port_lock)
302*/
303{
304 struct list_head *pool = &port->read_pool;
305 struct usb_ep *out = port->port_usb->out;
David Brownellc1dca562008-06-19 17:51:44 -0700306
307 while (!list_empty(pool)) {
308 struct usb_request *req;
309 int status;
310 struct tty_struct *tty;
311
David Brownell937ef732008-07-07 12:16:08 -0700312 /* no more rx if closed */
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200313 tty = port->port.tty;
David Brownell937ef732008-07-07 12:16:08 -0700314 if (!tty)
David Brownellc1dca562008-06-19 17:51:44 -0700315 break;
316
Jim Sung28609d42010-11-04 18:47:51 -0700317 if (port->read_started >= QUEUE_SIZE)
318 break;
319
David Brownellc1dca562008-06-19 17:51:44 -0700320 req = list_entry(pool->next, struct usb_request, list);
321 list_del(&req->list);
322 req->length = out->maxpacket;
323
324 /* drop lock while we call out; the controller driver
325 * may need to call us back (e.g. for disconnect)
326 */
327 spin_unlock(&port->port_lock);
328 status = usb_ep_queue(out, req, GFP_ATOMIC);
329 spin_lock(&port->port_lock);
330
331 if (status) {
332 pr_debug("%s: %s %s err %d\n",
333 __func__, "queue", out->name, status);
334 list_add(&req->list, pool);
335 break;
336 }
Jim Sung28609d42010-11-04 18:47:51 -0700337 port->read_started++;
David Brownellc1dca562008-06-19 17:51:44 -0700338
339 /* abort immediately after disconnect */
340 if (!port->port_usb)
341 break;
342 }
Jim Sung28609d42010-11-04 18:47:51 -0700343 return port->read_started;
David Brownellc1dca562008-06-19 17:51:44 -0700344}
345
David Brownell937ef732008-07-07 12:16:08 -0700346/*
347 * RX tasklet takes data out of the RX queue and hands it up to the TTY
348 * layer until it refuses to take any more data (or is throttled back).
349 * Then it issues reads for any further data.
350 *
351 * If the RX queue becomes full enough that no usb_request is queued,
352 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
353 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
354 * can be buffered before the TTY layer's buffers (currently 64 KB).
355 */
Michał Mirosław8b4c62a2018-12-16 21:23:47 +0100356static void gs_rx_push(struct work_struct *work)
David Brownell937ef732008-07-07 12:16:08 -0700357{
Michał Mirosław8b4c62a2018-12-16 21:23:47 +0100358 struct delayed_work *w = to_delayed_work(work);
359 struct gs_port *port = container_of(w, struct gs_port, push);
David Brownell937ef732008-07-07 12:16:08 -0700360 struct tty_struct *tty;
361 struct list_head *queue = &port->read_queue;
362 bool disconnect = false;
363 bool do_push = false;
364
365 /* hand any queued data to the tty */
366 spin_lock_irq(&port->port_lock);
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200367 tty = port->port.tty;
David Brownell937ef732008-07-07 12:16:08 -0700368 while (!list_empty(queue)) {
369 struct usb_request *req;
370
371 req = list_first_entry(queue, struct usb_request, list);
372
David Brownell937ef732008-07-07 12:16:08 -0700373 /* leave data queued if tty was rx throttled */
Peter Hurley97ef38b2016-04-09 17:11:36 -0700374 if (tty && tty_throttled(tty))
David Brownell937ef732008-07-07 12:16:08 -0700375 break;
376
377 switch (req->status) {
378 case -ESHUTDOWN:
379 disconnect = true;
Richard Leitnerc572a212014-08-21 08:57:28 +0200380 pr_vdebug("ttyGS%d: shutdown\n", port->port_num);
David Brownell937ef732008-07-07 12:16:08 -0700381 break;
382
383 default:
384 /* presumably a transient fault */
Richard Leitnerc572a212014-08-21 08:57:28 +0200385 pr_warn("ttyGS%d: unexpected RX status %d\n",
386 port->port_num, req->status);
David Brownell937ef732008-07-07 12:16:08 -0700387 /* FALLTHROUGH */
388 case 0:
389 /* normal completion */
390 break;
391 }
392
393 /* push data to (open) tty */
Stephen Warrendaa35bd2017-08-16 14:30:10 -0600394 if (req->actual && tty) {
David Brownell937ef732008-07-07 12:16:08 -0700395 char *packet = req->buf;
396 unsigned size = req->actual;
397 unsigned n;
398 int count;
399
400 /* we may have pushed part of this packet already... */
401 n = port->n_read;
402 if (n) {
403 packet += n;
404 size -= n;
405 }
406
Jiri Slaby05c7cd32013-01-03 15:53:04 +0100407 count = tty_insert_flip_string(&port->port, packet,
408 size);
David Brownell937ef732008-07-07 12:16:08 -0700409 if (count)
410 do_push = true;
411 if (count != size) {
412 /* stop pushing; TTY layer can't handle more */
413 port->n_read += count;
Richard Leitnerc572a212014-08-21 08:57:28 +0200414 pr_vdebug("ttyGS%d: rx block %d/%d\n",
415 port->port_num, count, req->actual);
David Brownell937ef732008-07-07 12:16:08 -0700416 break;
417 }
418 port->n_read = 0;
419 }
Jiri Slaby05c7cd32013-01-03 15:53:04 +0100420
David Brownell937ef732008-07-07 12:16:08 -0700421 list_move(&req->list, &port->read_pool);
Jim Sung28609d42010-11-04 18:47:51 -0700422 port->read_started--;
David Brownell937ef732008-07-07 12:16:08 -0700423 }
424
Peter Hurleya9c3f682014-02-22 07:31:21 -0500425 /* Push from tty to ldisc; this is handled by a workqueue,
426 * so we won't get callbacks and can hold port_lock
David Brownell937ef732008-07-07 12:16:08 -0700427 */
Jiri Slaby2e124b42013-01-03 15:53:06 +0100428 if (do_push)
429 tty_flip_buffer_push(&port->port);
David Brownell937ef732008-07-07 12:16:08 -0700430
431
432 /* We want our data queue to become empty ASAP, keeping data
433 * in the tty and ldisc (not here). If we couldn't push any
Michał Mirosław8b4c62a2018-12-16 21:23:47 +0100434 * this time around, RX may be starved, so wait until next jiffy.
David Brownell937ef732008-07-07 12:16:08 -0700435 *
Michał Mirosław8b4c62a2018-12-16 21:23:47 +0100436 * We may leave non-empty queue only when there is a tty, and
437 * either it is throttled or there is no more room in flip buffer.
David Brownell937ef732008-07-07 12:16:08 -0700438 */
Michał Mirosław8b4c62a2018-12-16 21:23:47 +0100439 if (!list_empty(queue) && !tty_throttled(tty))
440 schedule_delayed_work(&port->push, 1);
David Brownell937ef732008-07-07 12:16:08 -0700441
442 /* If we're still connected, refill the USB RX queue. */
443 if (!disconnect && port->port_usb)
444 gs_start_rx(port);
445
446 spin_unlock_irq(&port->port_lock);
447}
448
David Brownellc1dca562008-06-19 17:51:44 -0700449static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
450{
David Brownellc1dca562008-06-19 17:51:44 -0700451 struct gs_port *port = ep->driver_data;
452
David Brownell937ef732008-07-07 12:16:08 -0700453 /* Queue all received data until the tty layer is ready for it. */
David Brownellc1dca562008-06-19 17:51:44 -0700454 spin_lock(&port->port_lock);
David Brownell937ef732008-07-07 12:16:08 -0700455 list_add_tail(&req->list, &port->read_queue);
Michał Mirosław8b4c62a2018-12-16 21:23:47 +0100456 schedule_delayed_work(&port->push, 0);
David Brownellc1dca562008-06-19 17:51:44 -0700457 spin_unlock(&port->port_lock);
458}
459
460static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
461{
462 struct gs_port *port = ep->driver_data;
463
464 spin_lock(&port->port_lock);
465 list_add(&req->list, &port->write_pool);
Jim Sung28609d42010-11-04 18:47:51 -0700466 port->write_started--;
David Brownellc1dca562008-06-19 17:51:44 -0700467
468 switch (req->status) {
469 default:
470 /* presumably a transient fault */
Joe Perches3f5ad862016-09-27 09:16:59 -0700471 pr_warn("%s: unexpected %s status %d\n",
472 __func__, ep->name, req->status);
David Brownellc1dca562008-06-19 17:51:44 -0700473 /* FALL THROUGH */
474 case 0:
475 /* normal completion */
476 gs_start_tx(port);
477 break;
478
479 case -ESHUTDOWN:
480 /* disconnect */
481 pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
482 break;
483 }
484
485 spin_unlock(&port->port_lock);
486}
487
Jim Sung28609d42010-11-04 18:47:51 -0700488static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
489 int *allocated)
David Brownellc1dca562008-06-19 17:51:44 -0700490{
491 struct usb_request *req;
492
493 while (!list_empty(head)) {
494 req = list_entry(head->next, struct usb_request, list);
495 list_del(&req->list);
496 gs_free_req(ep, req);
Jim Sung28609d42010-11-04 18:47:51 -0700497 if (allocated)
498 (*allocated)--;
David Brownellc1dca562008-06-19 17:51:44 -0700499 }
500}
501
502static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
Jim Sung28609d42010-11-04 18:47:51 -0700503 void (*fn)(struct usb_ep *, struct usb_request *),
504 int *allocated)
David Brownellc1dca562008-06-19 17:51:44 -0700505{
506 int i;
507 struct usb_request *req;
Jim Sung28609d42010-11-04 18:47:51 -0700508 int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
David Brownellc1dca562008-06-19 17:51:44 -0700509
510 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
511 * do quite that many this time, don't fail ... we just won't
512 * be as speedy as we might otherwise be.
513 */
Jim Sung28609d42010-11-04 18:47:51 -0700514 for (i = 0; i < n; i++) {
David Brownellc1dca562008-06-19 17:51:44 -0700515 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
516 if (!req)
517 return list_empty(head) ? -ENOMEM : 0;
518 req->complete = fn;
519 list_add_tail(&req->list, head);
Jim Sung28609d42010-11-04 18:47:51 -0700520 if (allocated)
521 (*allocated)++;
David Brownellc1dca562008-06-19 17:51:44 -0700522 }
523 return 0;
524}
525
526/**
527 * gs_start_io - start USB I/O streams
528 * @dev: encapsulates endpoints to use
529 * Context: holding port_lock; port_tty and port_usb are non-null
530 *
531 * We only start I/O when something is connected to both sides of
532 * this port. If nothing is listening on the host side, we may
533 * be pointlessly filling up our TX buffers and FIFO.
534 */
535static int gs_start_io(struct gs_port *port)
536{
537 struct list_head *head = &port->read_pool;
538 struct usb_ep *ep = port->port_usb->out;
539 int status;
540 unsigned started;
541
542 /* Allocate RX and TX I/O buffers. We can't easily do this much
543 * earlier (with GFP_KERNEL) because the requests are coupled to
544 * endpoints, as are the packet sizes we'll be using. Different
545 * configurations may use different endpoints with a given port;
546 * and high speed vs full speed changes packet sizes too.
547 */
Jim Sung28609d42010-11-04 18:47:51 -0700548 status = gs_alloc_requests(ep, head, gs_read_complete,
549 &port->read_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700550 if (status)
551 return status;
552
553 status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
Jim Sung28609d42010-11-04 18:47:51 -0700554 gs_write_complete, &port->write_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700555 if (status) {
Jim Sung28609d42010-11-04 18:47:51 -0700556 gs_free_requests(ep, head, &port->read_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700557 return status;
558 }
559
560 /* queue read requests */
David Brownell937ef732008-07-07 12:16:08 -0700561 port->n_read = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700562 started = gs_start_rx(port);
563
David Brownellc1dca562008-06-19 17:51:44 -0700564 if (started) {
Sergey Organove4bfded2020-01-29 14:21:46 +0300565 gs_start_tx(port);
566 /* Unblock any pending writes into our circular buffer, in case
567 * we didn't in gs_start_tx() */
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200568 tty_wakeup(port->port.tty);
David Brownellc1dca562008-06-19 17:51:44 -0700569 } else {
Jim Sung28609d42010-11-04 18:47:51 -0700570 gs_free_requests(ep, head, &port->read_allocated);
571 gs_free_requests(port->port_usb->in, &port->write_pool,
572 &port->write_allocated);
David Brownell937ef732008-07-07 12:16:08 -0700573 status = -EIO;
David Brownellc1dca562008-06-19 17:51:44 -0700574 }
575
David Brownell937ef732008-07-07 12:16:08 -0700576 return status;
David Brownellc1dca562008-06-19 17:51:44 -0700577}
578
579/*-------------------------------------------------------------------------*/
580
581/* TTY Driver */
582
583/*
584 * gs_open sets up the link between a gs_port and its associated TTY.
585 * That link is broken *only* by TTY close(), and all driver methods
586 * know that.
587 */
588static int gs_open(struct tty_struct *tty, struct file *file)
589{
590 int port_num = tty->index;
591 struct gs_port *port;
Michał Mirosławc6561082019-08-10 10:42:53 +0200592 int status = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700593
Michał Mirosławc6561082019-08-10 10:42:53 +0200594 mutex_lock(&ports[port_num].lock);
595 port = ports[port_num].port;
596 if (!port) {
597 status = -ENODEV;
598 goto out;
599 }
David Brownellc1dca562008-06-19 17:51:44 -0700600
David Brownellc1dca562008-06-19 17:51:44 -0700601 spin_lock_irq(&port->port_lock);
602
603 /* allocate circular buffer on first open */
Lu Baolua622ee92017-11-28 12:46:28 +0800604 if (!kfifo_initialized(&port->port_write_buf)) {
David Brownellc1dca562008-06-19 17:51:44 -0700605
606 spin_unlock_irq(&port->port_lock);
Michał Mirosławc6561082019-08-10 10:42:53 +0200607
608 /*
609 * portmaster's mutex still protects from simultaneous open(),
610 * and close() can't happen, yet.
611 */
612
Lu Baolua622ee92017-11-28 12:46:28 +0800613 status = kfifo_alloc(&port->port_write_buf,
614 WRITE_BUF_SIZE, GFP_KERNEL);
David Brownellc1dca562008-06-19 17:51:44 -0700615 if (status) {
616 pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
Michał Mirosławc6561082019-08-10 10:42:53 +0200617 port_num, tty, file);
618 goto out;
David Brownellc1dca562008-06-19 17:51:44 -0700619 }
Michał Mirosławc6561082019-08-10 10:42:53 +0200620
621 spin_lock_irq(&port->port_lock);
David Brownellc1dca562008-06-19 17:51:44 -0700622 }
623
Michał Mirosławc6561082019-08-10 10:42:53 +0200624 /* already open? Great. */
625 if (port->port.count++)
626 goto exit_unlock_port;
David Brownellc1dca562008-06-19 17:51:44 -0700627
628 tty->driver_data = port;
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200629 port->port.tty = tty;
David Brownellc1dca562008-06-19 17:51:44 -0700630
David Brownellc1dca562008-06-19 17:51:44 -0700631 /* if connected, start the I/O stream */
632 if (port->port_usb) {
David Brownell1f1ba112008-08-06 18:49:57 -0700633 struct gserial *gser = port->port_usb;
634
David Brownellc1dca562008-06-19 17:51:44 -0700635 pr_debug("gs_open: start ttyGS%d\n", port->port_num);
636 gs_start_io(port);
637
David Brownell1f1ba112008-08-06 18:49:57 -0700638 if (gser->connect)
639 gser->connect(gser);
David Brownellc1dca562008-06-19 17:51:44 -0700640 }
641
642 pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
643
David Brownellc1dca562008-06-19 17:51:44 -0700644exit_unlock_port:
645 spin_unlock_irq(&port->port_lock);
Michał Mirosławc6561082019-08-10 10:42:53 +0200646out:
647 mutex_unlock(&ports[port_num].lock);
David Brownellc1dca562008-06-19 17:51:44 -0700648 return status;
649}
650
Michał Mirosławc6561082019-08-10 10:42:53 +0200651static int gs_close_flush_done(struct gs_port *p)
David Brownellc1dca562008-06-19 17:51:44 -0700652{
653 int cond;
654
Michał Mirosławc6561082019-08-10 10:42:53 +0200655 /* return true on disconnect or empty buffer or if raced with open() */
David Brownellc1dca562008-06-19 17:51:44 -0700656 spin_lock_irq(&p->port_lock);
Michał Mirosławc6561082019-08-10 10:42:53 +0200657 cond = p->port_usb == NULL || !kfifo_len(&p->port_write_buf) ||
658 p->port.count > 1;
David Brownellc1dca562008-06-19 17:51:44 -0700659 spin_unlock_irq(&p->port_lock);
660
661 return cond;
662}
663
664static void gs_close(struct tty_struct *tty, struct file *file)
665{
666 struct gs_port *port = tty->driver_data;
David Brownell1f1ba112008-08-06 18:49:57 -0700667 struct gserial *gser;
David Brownellc1dca562008-06-19 17:51:44 -0700668
669 spin_lock_irq(&port->port_lock);
670
Jiri Slaby266e37e2012-04-02 13:54:47 +0200671 if (port->port.count != 1) {
Michał Mirosławc6561082019-08-10 10:42:53 +0200672raced_with_open:
Jiri Slaby266e37e2012-04-02 13:54:47 +0200673 if (port->port.count == 0)
David Brownellc1dca562008-06-19 17:51:44 -0700674 WARN_ON(1);
675 else
Jiri Slaby266e37e2012-04-02 13:54:47 +0200676 --port->port.count;
David Brownellc1dca562008-06-19 17:51:44 -0700677 goto exit;
678 }
679
680 pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
681
David Brownell1f1ba112008-08-06 18:49:57 -0700682 gser = port->port_usb;
683 if (gser && gser->disconnect)
684 gser->disconnect(gser);
David Brownellc1dca562008-06-19 17:51:44 -0700685
686 /* wait for circular write buffer to drain, disconnect, or at
687 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
688 */
Lu Baolua622ee92017-11-28 12:46:28 +0800689 if (kfifo_len(&port->port_write_buf) > 0 && gser) {
David Brownellc1dca562008-06-19 17:51:44 -0700690 spin_unlock_irq(&port->port_lock);
691 wait_event_interruptible_timeout(port->drain_wait,
Michał Mirosławc6561082019-08-10 10:42:53 +0200692 gs_close_flush_done(port),
David Brownellc1dca562008-06-19 17:51:44 -0700693 GS_CLOSE_TIMEOUT * HZ);
694 spin_lock_irq(&port->port_lock);
Michał Mirosławc6561082019-08-10 10:42:53 +0200695
696 if (port->port.count != 1)
697 goto raced_with_open;
698
David Brownell1f1ba112008-08-06 18:49:57 -0700699 gser = port->port_usb;
David Brownellc1dca562008-06-19 17:51:44 -0700700 }
701
702 /* Iff we're disconnected, there can be no I/O in flight so it's
703 * ok to free the circular buffer; else just scrub it. And don't
704 * let the push tasklet fire again until we're re-opened.
705 */
David Brownell1f1ba112008-08-06 18:49:57 -0700706 if (gser == NULL)
Lu Baolua622ee92017-11-28 12:46:28 +0800707 kfifo_free(&port->port_write_buf);
David Brownellc1dca562008-06-19 17:51:44 -0700708 else
Lu Baolua622ee92017-11-28 12:46:28 +0800709 kfifo_reset(&port->port_write_buf);
David Brownellc1dca562008-06-19 17:51:44 -0700710
Michał Mirosławc6561082019-08-10 10:42:53 +0200711 port->port.count = 0;
Jiri Slaby35f95fd2012-04-02 13:54:48 +0200712 port->port.tty = NULL;
David Brownellc1dca562008-06-19 17:51:44 -0700713
David Brownellc1dca562008-06-19 17:51:44 -0700714 pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
715 port->port_num, tty, file);
716
Peter Hurleyb140dfe2015-10-10 16:00:53 -0400717 wake_up(&port->close_wait);
David Brownellc1dca562008-06-19 17:51:44 -0700718exit:
719 spin_unlock_irq(&port->port_lock);
720}
721
722static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
723{
724 struct gs_port *port = tty->driver_data;
725 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -0700726
727 pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
728 port->port_num, tty, count);
729
730 spin_lock_irqsave(&port->port_lock, flags);
731 if (count)
Lu Baolua622ee92017-11-28 12:46:28 +0800732 count = kfifo_in(&port->port_write_buf, buf, count);
David Brownellc1dca562008-06-19 17:51:44 -0700733 /* treat count == 0 as flush_chars() */
734 if (port->port_usb)
Michal Nazarewicz872ce512016-05-31 14:17:21 +0200735 gs_start_tx(port);
David Brownellc1dca562008-06-19 17:51:44 -0700736 spin_unlock_irqrestore(&port->port_lock, flags);
737
738 return count;
739}
740
741static int gs_put_char(struct tty_struct *tty, unsigned char ch)
742{
743 struct gs_port *port = tty->driver_data;
744 unsigned long flags;
745 int status;
746
Scott Wood16d9efa2015-03-12 16:46:01 -0500747 pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %ps\n",
David Brownellc1dca562008-06-19 17:51:44 -0700748 port->port_num, tty, ch, __builtin_return_address(0));
749
750 spin_lock_irqsave(&port->port_lock, flags);
Lu Baolua622ee92017-11-28 12:46:28 +0800751 status = kfifo_put(&port->port_write_buf, ch);
David Brownellc1dca562008-06-19 17:51:44 -0700752 spin_unlock_irqrestore(&port->port_lock, flags);
753
754 return status;
755}
756
757static void gs_flush_chars(struct tty_struct *tty)
758{
759 struct gs_port *port = tty->driver_data;
760 unsigned long flags;
761
762 pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
763
764 spin_lock_irqsave(&port->port_lock, flags);
765 if (port->port_usb)
766 gs_start_tx(port);
767 spin_unlock_irqrestore(&port->port_lock, flags);
768}
769
770static int gs_write_room(struct tty_struct *tty)
771{
772 struct gs_port *port = tty->driver_data;
773 unsigned long flags;
774 int room = 0;
775
776 spin_lock_irqsave(&port->port_lock, flags);
777 if (port->port_usb)
Lu Baolua622ee92017-11-28 12:46:28 +0800778 room = kfifo_avail(&port->port_write_buf);
David Brownellc1dca562008-06-19 17:51:44 -0700779 spin_unlock_irqrestore(&port->port_lock, flags);
780
781 pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
782 port->port_num, tty, room);
783
784 return room;
785}
786
787static int gs_chars_in_buffer(struct tty_struct *tty)
788{
789 struct gs_port *port = tty->driver_data;
790 unsigned long flags;
791 int chars = 0;
792
793 spin_lock_irqsave(&port->port_lock, flags);
Lu Baolua622ee92017-11-28 12:46:28 +0800794 chars = kfifo_len(&port->port_write_buf);
David Brownellc1dca562008-06-19 17:51:44 -0700795 spin_unlock_irqrestore(&port->port_lock, flags);
796
797 pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
798 port->port_num, tty, chars);
799
800 return chars;
801}
802
803/* undo side effects of setting TTY_THROTTLED */
804static void gs_unthrottle(struct tty_struct *tty)
805{
806 struct gs_port *port = tty->driver_data;
807 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -0700808
809 spin_lock_irqsave(&port->port_lock, flags);
David Brownell937ef732008-07-07 12:16:08 -0700810 if (port->port_usb) {
811 /* Kickstart read queue processing. We don't do xon/xoff,
812 * rts/cts, or other handshaking with the host, but if the
813 * read queue backs up enough we'll be NAKing OUT packets.
814 */
Richard Leitnerc572a212014-08-21 08:57:28 +0200815 pr_vdebug("ttyGS%d: unthrottle\n", port->port_num);
Michał Mirosław8b4c62a2018-12-16 21:23:47 +0100816 schedule_delayed_work(&port->push, 0);
David Brownell937ef732008-07-07 12:16:08 -0700817 }
David Brownellc1dca562008-06-19 17:51:44 -0700818 spin_unlock_irqrestore(&port->port_lock, flags);
David Brownellc1dca562008-06-19 17:51:44 -0700819}
820
David Brownell1f1ba112008-08-06 18:49:57 -0700821static int gs_break_ctl(struct tty_struct *tty, int duration)
822{
823 struct gs_port *port = tty->driver_data;
824 int status = 0;
825 struct gserial *gser;
826
827 pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
828 port->port_num, duration);
829
830 spin_lock_irq(&port->port_lock);
831 gser = port->port_usb;
832 if (gser && gser->send_break)
833 status = gser->send_break(gser, duration);
834 spin_unlock_irq(&port->port_lock);
835
836 return status;
837}
838
David Brownellc1dca562008-06-19 17:51:44 -0700839static const struct tty_operations gs_tty_ops = {
840 .open = gs_open,
841 .close = gs_close,
842 .write = gs_write,
843 .put_char = gs_put_char,
844 .flush_chars = gs_flush_chars,
845 .write_room = gs_write_room,
846 .chars_in_buffer = gs_chars_in_buffer,
847 .unthrottle = gs_unthrottle,
David Brownell1f1ba112008-08-06 18:49:57 -0700848 .break_ctl = gs_break_ctl,
David Brownellc1dca562008-06-19 17:51:44 -0700849};
850
851/*-------------------------------------------------------------------------*/
852
853static struct tty_driver *gs_tty_driver;
854
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800855#ifdef CONFIG_U_SERIAL_CONSOLE
856
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200857static void gs_console_complete_out(struct usb_ep *ep, struct usb_request *req)
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800858{
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200859 struct gs_console *cons = req->context;
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800860
861 switch (req->status) {
862 default:
863 pr_warn("%s: unexpected %s status %d\n",
864 __func__, ep->name, req->status);
Gustavo A. R. Silvafdb5e4f2017-10-23 22:15:46 -0500865 /* fall through */
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800866 case 0:
867 /* normal completion */
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200868 spin_lock(&cons->lock);
869 req->length = 0;
870 schedule_work(&cons->work);
871 spin_unlock(&cons->lock);
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800872 break;
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200873 case -ECONNRESET:
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800874 case -ESHUTDOWN:
875 /* disconnect */
876 pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
877 break;
878 }
879}
880
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200881static void __gs_console_push(struct gs_console *cons)
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800882{
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200883 struct usb_request *req = cons->req;
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800884 struct usb_ep *ep;
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200885 size_t size;
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800886
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200887 if (!req)
888 return; /* disconnected */
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800889
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200890 if (req->length)
891 return; /* busy */
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800892
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200893 ep = cons->console.data;
894 size = kfifo_out(&cons->buf, req->buf, ep->maxpacket);
895 if (!size)
896 return;
897
Michał Mirosławef9b4572019-08-10 10:42:52 +0200898 if (cons->missed && ep->maxpacket >= 64) {
899 char buf[64];
900 size_t len;
901
902 len = sprintf(buf, "\n[missed %zu bytes]\n", cons->missed);
903 kfifo_in(&cons->buf, buf, len);
904 cons->missed = 0;
905 }
906
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200907 req->length = size;
908 if (usb_ep_queue(ep, req, GFP_ATOMIC))
909 req->length = 0;
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800910}
911
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200912static void gs_console_work(struct work_struct *work)
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800913{
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200914 struct gs_console *cons = container_of(work, struct gs_console, work);
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800915
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200916 spin_lock_irq(&cons->lock);
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800917
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200918 __gs_console_push(cons);
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800919
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200920 spin_unlock_irq(&cons->lock);
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800921}
922
923static void gs_console_write(struct console *co,
924 const char *buf, unsigned count)
925{
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200926 struct gs_console *cons = container_of(co, struct gs_console, console);
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800927 unsigned long flags;
Michał Mirosławef9b4572019-08-10 10:42:52 +0200928 size_t n;
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800929
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200930 spin_lock_irqsave(&cons->lock, flags);
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800931
Michał Mirosławef9b4572019-08-10 10:42:52 +0200932 n = kfifo_in(&cons->buf, buf, count);
933 if (n < count)
934 cons->missed += count - n;
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200935
936 if (cons->req && !cons->req->length)
937 schedule_work(&cons->work);
938
939 spin_unlock_irqrestore(&cons->lock, flags);
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800940}
941
942static struct tty_driver *gs_console_device(struct console *co, int *index)
943{
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800944 *index = co->index;
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200945 return gs_tty_driver;
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800946}
947
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200948static int gs_console_connect(struct gs_port *port)
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800949{
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200950 struct gs_console *cons = port->console;
951 struct usb_request *req;
952 struct usb_ep *ep;
953
954 if (!cons)
955 return 0;
956
957 ep = port->port_usb->in;
958 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
959 if (!req)
960 return -ENOMEM;
961 req->complete = gs_console_complete_out;
962 req->context = cons;
963 req->length = 0;
964
965 spin_lock(&cons->lock);
966 cons->req = req;
967 cons->console.data = ep;
968 spin_unlock(&cons->lock);
969
970 pr_debug("ttyGS%d: console connected!\n", port->port_num);
971
972 schedule_work(&cons->work);
973
974 return 0;
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800975}
976
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200977static void gs_console_disconnect(struct gs_port *port)
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800978{
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200979 struct gs_console *cons = port->console;
980 struct usb_request *req;
981 struct usb_ep *ep;
Baolin Wanga5beaaf2015-11-21 15:44:53 +0800982
Michał Mirosławfe1ea632019-08-10 10:42:49 +0200983 if (!cons)
984 return;
985
986 spin_lock(&cons->lock);
987
988 req = cons->req;
989 ep = cons->console.data;
990 cons->req = NULL;
991
992 spin_unlock(&cons->lock);
993
994 if (!req)
995 return;
996
997 usb_ep_dequeue(ep, req);
998 gs_free_req(ep, req);
999}
1000
1001static int gs_console_init(struct gs_port *port)
1002{
1003 struct gs_console *cons;
1004 int err;
1005
1006 if (port->console)
1007 return 0;
1008
1009 cons = kzalloc(sizeof(*port->console), GFP_KERNEL);
1010 if (!cons)
1011 return -ENOMEM;
1012
1013 strcpy(cons->console.name, "ttyGS");
1014 cons->console.write = gs_console_write;
1015 cons->console.device = gs_console_device;
1016 cons->console.flags = CON_PRINTBUFFER;
1017 cons->console.index = port->port_num;
1018
1019 INIT_WORK(&cons->work, gs_console_work);
1020 spin_lock_init(&cons->lock);
1021
1022 err = kfifo_alloc(&cons->buf, GS_CONSOLE_BUF_SIZE, GFP_KERNEL);
1023 if (err) {
1024 pr_err("ttyGS%d: allocate console buffer failed\n", port->port_num);
1025 kfree(cons);
1026 return err;
1027 }
1028
1029 port->console = cons;
1030 register_console(&cons->console);
1031
1032 spin_lock_irq(&port->port_lock);
1033 if (port->port_usb)
1034 gs_console_connect(port);
1035 spin_unlock_irq(&port->port_lock);
1036
1037 return 0;
1038}
1039
1040static void gs_console_exit(struct gs_port *port)
1041{
1042 struct gs_console *cons = port->console;
1043
1044 if (!cons)
1045 return;
1046
1047 unregister_console(&cons->console);
1048
1049 spin_lock_irq(&port->port_lock);
1050 if (cons->req)
1051 gs_console_disconnect(port);
1052 spin_unlock_irq(&port->port_lock);
1053
1054 cancel_work_sync(&cons->work);
1055 kfifo_free(&cons->buf);
1056 kfree(cons);
1057 port->console = NULL;
Baolin Wanga5beaaf2015-11-21 15:44:53 +08001058}
1059
Michał Mirosławd7cb8fb2019-08-10 10:42:51 +02001060ssize_t gserial_set_console(unsigned char port_num, const char *page, size_t count)
1061{
1062 struct gs_port *port;
1063 bool enable;
1064 int ret;
1065
1066 ret = strtobool(page, &enable);
1067 if (ret)
1068 return ret;
1069
1070 mutex_lock(&ports[port_num].lock);
1071 port = ports[port_num].port;
1072
1073 if (WARN_ON(port == NULL)) {
1074 ret = -ENXIO;
1075 goto out;
1076 }
1077
1078 if (enable)
1079 ret = gs_console_init(port);
1080 else
1081 gs_console_exit(port);
1082out:
1083 mutex_unlock(&ports[port_num].lock);
1084
1085 return ret < 0 ? ret : count;
1086}
1087EXPORT_SYMBOL_GPL(gserial_set_console);
1088
1089ssize_t gserial_get_console(unsigned char port_num, char *page)
1090{
1091 struct gs_port *port;
1092 ssize_t ret;
1093
1094 mutex_lock(&ports[port_num].lock);
1095 port = ports[port_num].port;
1096
1097 if (WARN_ON(port == NULL))
1098 ret = -ENXIO;
1099 else
1100 ret = sprintf(page, "%u\n", !!port->console);
1101
1102 mutex_unlock(&ports[port_num].lock);
1103
1104 return ret;
1105}
1106EXPORT_SYMBOL_GPL(gserial_get_console);
1107
Baolin Wanga5beaaf2015-11-21 15:44:53 +08001108#else
1109
Michał Mirosławfe1ea632019-08-10 10:42:49 +02001110static int gs_console_connect(struct gs_port *port)
Baolin Wanga5beaaf2015-11-21 15:44:53 +08001111{
1112 return 0;
1113}
1114
Michał Mirosławfe1ea632019-08-10 10:42:49 +02001115static void gs_console_disconnect(struct gs_port *port)
Baolin Wanga5beaaf2015-11-21 15:44:53 +08001116{
1117}
1118
Michał Mirosławfe1ea632019-08-10 10:42:49 +02001119static int gs_console_init(struct gs_port *port)
Baolin Wanga5beaaf2015-11-21 15:44:53 +08001120{
Michał Mirosławfe1ea632019-08-10 10:42:49 +02001121 return -ENOSYS;
Baolin Wanga5beaaf2015-11-21 15:44:53 +08001122}
1123
Michał Mirosławfe1ea632019-08-10 10:42:49 +02001124static void gs_console_exit(struct gs_port *port)
Baolin Wanga5beaaf2015-11-21 15:44:53 +08001125{
1126}
1127
1128#endif
1129
Benoit Gobyc3c04b22012-05-10 10:08:01 +02001130static int
David Brownellc1dca562008-06-19 17:51:44 -07001131gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
1132{
1133 struct gs_port *port;
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001134 int ret = 0;
1135
1136 mutex_lock(&ports[port_num].lock);
1137 if (ports[port_num].port) {
1138 ret = -EBUSY;
1139 goto out;
1140 }
David Brownellc1dca562008-06-19 17:51:44 -07001141
1142 port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001143 if (port == NULL) {
1144 ret = -ENOMEM;
1145 goto out;
1146 }
David Brownellc1dca562008-06-19 17:51:44 -07001147
Jiri Slaby266e37e2012-04-02 13:54:47 +02001148 tty_port_init(&port->port);
David Brownellc1dca562008-06-19 17:51:44 -07001149 spin_lock_init(&port->port_lock);
David Brownellc1dca562008-06-19 17:51:44 -07001150 init_waitqueue_head(&port->drain_wait);
Peter Hurleyb140dfe2015-10-10 16:00:53 -04001151 init_waitqueue_head(&port->close_wait);
David Brownellc1dca562008-06-19 17:51:44 -07001152
Michał Mirosław8b4c62a2018-12-16 21:23:47 +01001153 INIT_DELAYED_WORK(&port->push, gs_rx_push);
David Brownellc1dca562008-06-19 17:51:44 -07001154
1155 INIT_LIST_HEAD(&port->read_pool);
David Brownell937ef732008-07-07 12:16:08 -07001156 INIT_LIST_HEAD(&port->read_queue);
David Brownellc1dca562008-06-19 17:51:44 -07001157 INIT_LIST_HEAD(&port->write_pool);
1158
1159 port->port_num = port_num;
1160 port->port_line_coding = *coding;
1161
1162 ports[port_num].port = port;
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001163out:
1164 mutex_unlock(&ports[port_num].lock);
1165 return ret;
David Brownellc1dca562008-06-19 17:51:44 -07001166}
1167
1168static int gs_closed(struct gs_port *port)
1169{
1170 int cond;
1171
1172 spin_lock_irq(&port->port_lock);
Michał Mirosławc6561082019-08-10 10:42:53 +02001173 cond = port->port.count == 0;
David Brownellc1dca562008-06-19 17:51:44 -07001174 spin_unlock_irq(&port->port_lock);
Michał Mirosławc6561082019-08-10 10:42:53 +02001175
David Brownellc1dca562008-06-19 17:51:44 -07001176 return cond;
1177}
1178
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001179static void gserial_free_port(struct gs_port *port)
David Brownellc1dca562008-06-19 17:51:44 -07001180{
Michał Mirosław8b4c62a2018-12-16 21:23:47 +01001181 cancel_delayed_work_sync(&port->push);
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001182 /* wait for old opens to finish */
Peter Hurleyb140dfe2015-10-10 16:00:53 -04001183 wait_event(port->close_wait, gs_closed(port));
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001184 WARN_ON(port->port_usb != NULL);
1185 tty_port_destroy(&port->port);
1186 kfree(port);
1187}
1188
1189void gserial_free_line(unsigned char port_num)
1190{
David Brownellc1dca562008-06-19 17:51:44 -07001191 struct gs_port *port;
1192
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001193 mutex_lock(&ports[port_num].lock);
1194 if (WARN_ON(!ports[port_num].port)) {
1195 mutex_unlock(&ports[port_num].lock);
David Brownellac90e362008-07-01 13:18:20 -07001196 return;
David Brownellc1dca562008-06-19 17:51:44 -07001197 }
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001198 port = ports[port_num].port;
Michał Mirosławfe1ea632019-08-10 10:42:49 +02001199 gs_console_exit(port);
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001200 ports[port_num].port = NULL;
1201 mutex_unlock(&ports[port_num].lock);
David Brownellc1dca562008-06-19 17:51:44 -07001202
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001203 gserial_free_port(port);
1204 tty_unregister_device(gs_tty_driver, port_num);
David Brownellc1dca562008-06-19 17:51:44 -07001205}
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001206EXPORT_SYMBOL_GPL(gserial_free_line);
1207
Michał Mirosławb4173432019-08-10 10:42:50 +02001208int gserial_alloc_line_no_console(unsigned char *line_num)
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001209{
1210 struct usb_cdc_line_coding coding;
Michał Mirosławfe1ea632019-08-10 10:42:49 +02001211 struct gs_port *port;
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001212 struct device *tty_dev;
1213 int ret;
1214 int port_num;
1215
1216 coding.dwDTERate = cpu_to_le32(9600);
1217 coding.bCharFormat = 8;
1218 coding.bParityType = USB_CDC_NO_PARITY;
1219 coding.bDataBits = USB_CDC_1_STOP_BITS;
1220
1221 for (port_num = 0; port_num < MAX_U_SERIAL_PORTS; port_num++) {
1222 ret = gs_port_alloc(port_num, &coding);
1223 if (ret == -EBUSY)
1224 continue;
1225 if (ret)
1226 return ret;
1227 break;
1228 }
1229 if (ret)
1230 return ret;
1231
1232 /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
1233
Michał Mirosławfe1ea632019-08-10 10:42:49 +02001234 port = ports[port_num].port;
1235 tty_dev = tty_port_register_device(&port->port,
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001236 gs_tty_driver, port_num, NULL);
1237 if (IS_ERR(tty_dev)) {
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001238 pr_err("%s: failed to register tty for port %d, err %ld\n",
1239 __func__, port_num, PTR_ERR(tty_dev));
1240
1241 ret = PTR_ERR(tty_dev);
Michał Mirosławdaf82bd2019-08-10 10:42:48 +02001242 mutex_lock(&ports[port_num].lock);
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001243 ports[port_num].port = NULL;
Michał Mirosławdaf82bd2019-08-10 10:42:48 +02001244 mutex_unlock(&ports[port_num].lock);
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001245 gserial_free_port(port);
1246 goto err;
1247 }
1248 *line_num = port_num;
1249err:
1250 return ret;
1251}
Michał Mirosławb4173432019-08-10 10:42:50 +02001252EXPORT_SYMBOL_GPL(gserial_alloc_line_no_console);
1253
1254int gserial_alloc_line(unsigned char *line_num)
1255{
1256 int ret = gserial_alloc_line_no_console(line_num);
1257
1258 if (!ret && !*line_num)
1259 gs_console_init(ports[*line_num].port);
1260
1261 return ret;
1262}
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001263EXPORT_SYMBOL_GPL(gserial_alloc_line);
David Brownellc1dca562008-06-19 17:51:44 -07001264
1265/**
1266 * gserial_connect - notify TTY I/O glue that USB link is active
1267 * @gser: the function, set up with endpoints and descriptors
1268 * @port_num: which port is active
1269 * Context: any (usually from irq)
1270 *
1271 * This is called activate endpoints and let the TTY layer know that
1272 * the connection is active ... not unlike "carrier detect". It won't
1273 * necessarily start I/O queues; unless the TTY is held open by any
1274 * task, there would be no point. However, the endpoints will be
1275 * activated so the USB host can perform I/O, subject to basic USB
1276 * hardware flow control.
1277 *
1278 * Caller needs to have set up the endpoints and USB function in @dev
1279 * before calling this, as well as the appropriate (speed-specific)
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001280 * endpoint descriptors, and also have allocate @port_num by calling
1281 * @gserial_alloc_line().
David Brownellc1dca562008-06-19 17:51:44 -07001282 *
1283 * Returns negative errno or zero.
1284 * On success, ep->driver_data will be overwritten.
1285 */
1286int gserial_connect(struct gserial *gser, u8 port_num)
1287{
1288 struct gs_port *port;
1289 unsigned long flags;
1290 int status;
1291
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001292 if (port_num >= MAX_U_SERIAL_PORTS)
David Brownellc1dca562008-06-19 17:51:44 -07001293 return -ENXIO;
1294
David Brownellc1dca562008-06-19 17:51:44 -07001295 port = ports[port_num].port;
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001296 if (!port) {
1297 pr_err("serial line %d not allocated.\n", port_num);
1298 return -EINVAL;
1299 }
1300 if (port->port_usb) {
1301 pr_err("serial line %d is in use.\n", port_num);
1302 return -EBUSY;
1303 }
David Brownellc1dca562008-06-19 17:51:44 -07001304
1305 /* activate the endpoints */
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001306 status = usb_ep_enable(gser->in);
David Brownellc1dca562008-06-19 17:51:44 -07001307 if (status < 0)
1308 return status;
1309 gser->in->driver_data = port;
1310
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001311 status = usb_ep_enable(gser->out);
David Brownellc1dca562008-06-19 17:51:44 -07001312 if (status < 0)
1313 goto fail_out;
1314 gser->out->driver_data = port;
1315
1316 /* then tell the tty glue that I/O can work */
1317 spin_lock_irqsave(&port->port_lock, flags);
1318 gser->ioport = port;
1319 port->port_usb = gser;
1320
1321 /* REVISIT unclear how best to handle this state...
1322 * we don't really couple it with the Linux TTY.
1323 */
1324 gser->port_line_coding = port->port_line_coding;
1325
1326 /* REVISIT if waiting on "carrier detect", signal. */
1327
David Brownell1f1ba112008-08-06 18:49:57 -07001328 /* if it's already open, start I/O ... and notify the serial
1329 * protocol about open/close status (connect/disconnect).
David Brownellc1dca562008-06-19 17:51:44 -07001330 */
Jiri Slaby266e37e2012-04-02 13:54:47 +02001331 if (port->port.count) {
David Brownellc1dca562008-06-19 17:51:44 -07001332 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
1333 gs_start_io(port);
David Brownell1f1ba112008-08-06 18:49:57 -07001334 if (gser->connect)
1335 gser->connect(gser);
1336 } else {
1337 if (gser->disconnect)
1338 gser->disconnect(gser);
David Brownellc1dca562008-06-19 17:51:44 -07001339 }
1340
Michał Mirosławfe1ea632019-08-10 10:42:49 +02001341 status = gs_console_connect(port);
David Brownellc1dca562008-06-19 17:51:44 -07001342 spin_unlock_irqrestore(&port->port_lock, flags);
1343
1344 return status;
1345
1346fail_out:
1347 usb_ep_disable(gser->in);
David Brownellc1dca562008-06-19 17:51:44 -07001348 return status;
1349}
Sebastian Andrzej Siewior3249ca22012-12-23 21:10:04 +01001350EXPORT_SYMBOL_GPL(gserial_connect);
David Brownellc1dca562008-06-19 17:51:44 -07001351/**
1352 * gserial_disconnect - notify TTY I/O glue that USB link is inactive
1353 * @gser: the function, on which gserial_connect() was called
1354 * Context: any (usually from irq)
1355 *
1356 * This is called to deactivate endpoints and let the TTY layer know
1357 * that the connection went inactive ... not unlike "hangup".
1358 *
1359 * On return, the state is as if gserial_connect() had never been called;
1360 * there is no active USB I/O on these endpoints.
1361 */
1362void gserial_disconnect(struct gserial *gser)
1363{
1364 struct gs_port *port = gser->ioport;
1365 unsigned long flags;
1366
1367 if (!port)
1368 return;
1369
1370 /* tell the TTY glue not to do I/O here any more */
1371 spin_lock_irqsave(&port->port_lock, flags);
1372
Michał Mirosławfe1ea632019-08-10 10:42:49 +02001373 gs_console_disconnect(port);
1374
David Brownellc1dca562008-06-19 17:51:44 -07001375 /* REVISIT as above: how best to track this? */
1376 port->port_line_coding = gser->port_line_coding;
1377
1378 port->port_usb = NULL;
1379 gser->ioport = NULL;
Michał Mirosławc6561082019-08-10 10:42:53 +02001380 if (port->port.count > 0) {
David Brownellc1dca562008-06-19 17:51:44 -07001381 wake_up_interruptible(&port->drain_wait);
Jiri Slaby35f95fd2012-04-02 13:54:48 +02001382 if (port->port.tty)
1383 tty_hangup(port->port.tty);
David Brownellc1dca562008-06-19 17:51:44 -07001384 }
1385 spin_unlock_irqrestore(&port->port_lock, flags);
1386
1387 /* disable endpoints, aborting down any active I/O */
1388 usb_ep_disable(gser->out);
David Brownellc1dca562008-06-19 17:51:44 -07001389 usb_ep_disable(gser->in);
David Brownellc1dca562008-06-19 17:51:44 -07001390
1391 /* finally, free any unused/unusable I/O buffers */
1392 spin_lock_irqsave(&port->port_lock, flags);
Michał Mirosławc6561082019-08-10 10:42:53 +02001393 if (port->port.count == 0)
Lu Baolua622ee92017-11-28 12:46:28 +08001394 kfifo_free(&port->port_write_buf);
Jim Sung28609d42010-11-04 18:47:51 -07001395 gs_free_requests(gser->out, &port->read_pool, NULL);
1396 gs_free_requests(gser->out, &port->read_queue, NULL);
1397 gs_free_requests(gser->in, &port->write_pool, NULL);
1398
1399 port->read_allocated = port->read_started =
1400 port->write_allocated = port->write_started = 0;
1401
David Brownellc1dca562008-06-19 17:51:44 -07001402 spin_unlock_irqrestore(&port->port_lock, flags);
1403}
Sebastian Andrzej Siewior3249ca22012-12-23 21:10:04 +01001404EXPORT_SYMBOL_GPL(gserial_disconnect);
1405
Felipe Balbi38b3ad52013-01-18 13:18:44 +02001406static int userial_init(void)
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001407{
1408 unsigned i;
1409 int status;
1410
1411 gs_tty_driver = alloc_tty_driver(MAX_U_SERIAL_PORTS);
1412 if (!gs_tty_driver)
1413 return -ENOMEM;
1414
1415 gs_tty_driver->driver_name = "g_serial";
Richard Leitnerc572a212014-08-21 08:57:28 +02001416 gs_tty_driver->name = "ttyGS";
Sebastian Andrzej Siewior19b10a82012-12-23 21:10:06 +01001417 /* uses dynamically assigned dev_t values */
1418
1419 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1420 gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
1421 gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
1422 gs_tty_driver->init_termios = tty_std_termios;
1423
1424 /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
1425 * MS-Windows. Otherwise, most of these flags shouldn't affect
1426 * anything unless we were to actually hook up to a serial line.
1427 */
1428 gs_tty_driver->init_termios.c_cflag =
1429 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1430 gs_tty_driver->init_termios.c_ispeed = 9600;
1431 gs_tty_driver->init_termios.c_ospeed = 9600;
1432
1433 tty_set_operations(gs_tty_driver, &gs_tty_ops);
1434 for (i = 0; i < MAX_U_SERIAL_PORTS; i++)
1435 mutex_init(&ports[i].lock);
1436
1437 /* export the driver ... */
1438 status = tty_register_driver(gs_tty_driver);
1439 if (status) {
1440 pr_err("%s: cannot register, err %d\n",
1441 __func__, status);
1442 goto fail;
1443 }
1444
1445 pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
1446 MAX_U_SERIAL_PORTS,
1447 (MAX_U_SERIAL_PORTS == 1) ? "" : "s");
1448
1449 return status;
1450fail:
1451 put_tty_driver(gs_tty_driver);
1452 gs_tty_driver = NULL;
1453 return status;
1454}
1455module_init(userial_init);
1456
1457static void userial_cleanup(void)
1458{
1459 tty_unregister_driver(gs_tty_driver);
1460 put_tty_driver(gs_tty_driver);
1461 gs_tty_driver = NULL;
1462}
1463module_exit(userial_cleanup);
1464
Sebastian Andrzej Siewior3249ca22012-12-23 21:10:04 +01001465MODULE_LICENSE("GPL");