blob: 3c0e17b4602d70b69e6dbe5d1b55d864c88a1c92 [file] [log] [blame]
Johan Hedberg7dec65c2012-07-16 16:12:02 +03001/*
2 *
3 * Bluetooth HCI Three-wire UART driver
4 *
5 * Copyright (C) 2012 Intel Corporation
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/skbuff.h>
27
28#include <net/bluetooth/bluetooth.h>
29#include <net/bluetooth/hci_core.h>
30
31#include "hci_uart.h"
32
Johan Hedbergc0a1b732012-07-16 16:12:06 +030033#define HCI_3WIRE_ACK_PKT 0
34#define HCI_3WIRE_LINK_PKT 15
35
Johan Hedberg3f27e952012-07-16 16:12:04 +030036#define H5_TXWINSIZE 4
37
38#define H5_ACK_TIMEOUT msecs_to_jiffies(250)
Johan Hedberg40f10222012-07-16 16:12:09 +030039#define H5_SYNC_TIMEOUT msecs_to_jiffies(100)
Johan Hedberg3f27e952012-07-16 16:12:04 +030040
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030041/*
42 * Maximum Three-wire packet:
43 * 4 byte header + max value for 12-bit length + 2 bytes for CRC
44 */
45#define H5_MAX_LEN (4 + 0xfff + 2)
46
Johan Hedberg01977c02012-07-16 16:12:07 +030047/* Convenience macros for reading Three-wire header values */
48#define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
49#define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
50#define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
51#define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
52#define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
53#define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0xff) + ((hdr)[2] << 4))
54
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030055#define SLIP_DELIMITER 0xc0
56#define SLIP_ESC 0xdb
57#define SLIP_ESC_DELIM 0xdc
58#define SLIP_ESC_ESC 0xdd
59
Johan Hedberg7d664fb2012-07-16 16:12:03 +030060struct h5 {
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030061 struct sk_buff_head unack; /* Unack'ed packets queue */
62 struct sk_buff_head rel; /* Reliable packets queue */
63 struct sk_buff_head unrel; /* Unreliable packets queue */
Johan Hedberg7d664fb2012-07-16 16:12:03 +030064
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030065 struct sk_buff *rx_skb; /* Receive buffer */
66 size_t rx_pending; /* Expecting more bytes */
67 bool rx_esc; /* SLIP escape mode */
Johan Hedberg43eb12d2012-07-16 16:12:08 +030068 u8 rx_ack; /* Last ack number received */
Johan Hedberg7d664fb2012-07-16 16:12:03 +030069
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030070 int (*rx_func) (struct hci_uart *hu, u8 c);
Johan Hedberg3f27e952012-07-16 16:12:04 +030071
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030072 struct timer_list timer; /* Retransmission timer */
Johan Hedberg7d664fb2012-07-16 16:12:03 +030073
Johan Hedberg43eb12d2012-07-16 16:12:08 +030074 bool tx_ack_req; /* Pending ack to send */
75 u8 tx_seq; /* Next seq number to send */
Johan Hedberg40f10222012-07-16 16:12:09 +030076 u8 tx_ack; /* Next ack number to send */
Johan Hedberg7d664fb2012-07-16 16:12:03 +030077};
78
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030079static void h5_reset_rx(struct h5 *h5);
80
Johan Hedberg3f27e952012-07-16 16:12:04 +030081static void h5_timed_event(unsigned long arg)
82{
83 struct hci_uart *hu = (struct hci_uart *) arg;
84 struct h5 *h5 = hu->priv;
85 struct sk_buff *skb;
86 unsigned long flags;
87
88 BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
89
90 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
91
92 while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
Johan Hedberg43eb12d2012-07-16 16:12:08 +030093 h5->tx_seq = (h5->tx_seq - 1) & 0x07;
Johan Hedberg3f27e952012-07-16 16:12:04 +030094 skb_queue_head(&h5->rel, skb);
95 }
96
97 spin_unlock_irqrestore(&h5->unack.lock, flags);
98
99 hci_uart_tx_wakeup(hu);
100}
101
Johan Hedberg40f10222012-07-16 16:12:09 +0300102static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
103{
104 struct h5 *h5 = hu->priv;
105 struct sk_buff *nskb;
106
107 nskb = alloc_skb(3, GFP_ATOMIC);
108 if (!nskb)
109 return;
110
111 bt_cb(nskb)->pkt_type = HCI_3WIRE_LINK_PKT;
112
113 memcpy(skb_put(nskb, len), data, len);
114
115 skb_queue_tail(&h5->unrel, nskb);
116}
117
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300118static int h5_open(struct hci_uart *hu)
119{
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300120 struct h5 *h5;
Johan Hedberg40f10222012-07-16 16:12:09 +0300121 const unsigned char sync[] = { 0x01, 0x7e };
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300122
123 BT_DBG("hu %p", hu);
124
125 h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
126 if (!h5)
127 return -ENOMEM;
128
129 hu->priv = h5;
130
131 skb_queue_head_init(&h5->unack);
132 skb_queue_head_init(&h5->rel);
133 skb_queue_head_init(&h5->unrel);
134
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300135 h5_reset_rx(h5);
136
Johan Hedberg3f27e952012-07-16 16:12:04 +0300137 init_timer(&h5->timer);
138 h5->timer.function = h5_timed_event;
139 h5->timer.data = (unsigned long) hu;
140
Johan Hedbergcd1b4422012-07-16 16:12:12 +0300141 set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
142
Johan Hedberg40f10222012-07-16 16:12:09 +0300143 /* Send initial sync request */
144 h5_link_control(hu, sync, sizeof(sync));
145 mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
146
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300147 return 0;
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300148}
149
150static int h5_close(struct hci_uart *hu)
151{
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300152 struct h5 *h5 = hu->priv;
153
154 skb_queue_purge(&h5->unack);
155 skb_queue_purge(&h5->rel);
156 skb_queue_purge(&h5->unrel);
157
Johan Hedberg3f27e952012-07-16 16:12:04 +0300158 del_timer(&h5->timer);
159
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300160 kfree(h5);
161
162 return 0;
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300163}
164
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300165static void h5_pkt_cull(struct h5 *h5)
166{
167 struct sk_buff *skb, *tmp;
168 unsigned long flags;
169 int i, to_remove;
170 u8 seq;
171
172 spin_lock_irqsave(&h5->unack.lock, flags);
173
174 to_remove = skb_queue_len(&h5->unack);
Johan Hedberg40f10222012-07-16 16:12:09 +0300175 if (to_remove == 0)
176 goto unlock;
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300177
178 seq = h5->tx_seq;
179
180 while (to_remove > 0) {
181 if (h5->rx_ack == seq)
182 break;
183
184 to_remove--;
185 seq = (seq - 1) % 8;
186 }
187
188 if (seq != h5->rx_ack)
189 BT_ERR("Controller acked invalid packet");
190
191 i = 0;
192 skb_queue_walk_safe(&h5->unack, skb, tmp) {
193 if (i++ >= to_remove)
194 break;
195
196 __skb_unlink(skb, &h5->unack);
197 kfree_skb(skb);
198 }
199
200 if (skb_queue_empty(&h5->unack))
201 del_timer(&h5->timer);
202
Johan Hedberg40f10222012-07-16 16:12:09 +0300203unlock:
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300204 spin_unlock_irqrestore(&h5->unack.lock, flags);
205}
206
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300207static void h5_handle_internal_rx(struct hci_uart *hu)
208{
Johan Hedberg40f10222012-07-16 16:12:09 +0300209 struct h5 *h5 = hu->priv;
210 const unsigned char sync_req[] = { 0x01, 0x7e };
211 const unsigned char sync_rsp[] = { 0x02, 0x7d };
212 const unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
213 const unsigned char conf_rsp[] = { 0x04, 0x7b, 0x01 };
214 const unsigned char *hdr = h5->rx_skb->data;
215 const unsigned char *data = &h5->rx_skb->data[4];
216
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300217 BT_DBG("%s", hu->hdev->name);
Johan Hedberg40f10222012-07-16 16:12:09 +0300218
219 if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
220 return;
221
222 if (H5_HDR_LEN(hdr) < 2)
223 return;
224
225 if (memcmp(data, sync_req, 2) == 0) {
226 h5_link_control(hu, sync_rsp, 2);
227 } else if (memcmp(data, sync_rsp, 2) == 0) {
228 h5_link_control(hu, conf_req, 3);
229 } else if (memcmp(data, conf_req, 2) == 0) {
230 h5_link_control(hu, conf_rsp, 2);
231 h5_link_control(hu, conf_req, 3);
232 } else if (memcmp(data, conf_rsp, 2) == 0) {
233 BT_DBG("Three-wire init sequence complete");
Johan Hedbergcd1b4422012-07-16 16:12:12 +0300234 hci_uart_init_ready(hu);
Johan Hedberg40f10222012-07-16 16:12:09 +0300235 return;
236 } else {
237 BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
238 return;
239 }
240
241 hci_uart_tx_wakeup(hu);
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300242}
243
244static void h5_complete_rx_pkt(struct hci_uart *hu)
245{
246 struct h5 *h5 = hu->priv;
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300247 const unsigned char *hdr = h5->rx_skb->data;
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300248
249 BT_DBG("%s", hu->hdev->name);
250
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300251 if (H5_HDR_RELIABLE(hdr)) {
Johan Hedberg40f10222012-07-16 16:12:09 +0300252 h5->tx_ack = (h5->tx_ack + 1) % 8;
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300253 h5->tx_ack_req = true;
Johan Hedberg40f10222012-07-16 16:12:09 +0300254 hci_uart_tx_wakeup(hu);
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300255 }
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300256
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300257 h5->rx_ack = H5_HDR_ACK(hdr);
258
259 h5_pkt_cull(h5);
260
261 switch (H5_HDR_PKT_TYPE(hdr)) {
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300262 case HCI_EVENT_PKT:
263 case HCI_ACLDATA_PKT:
264 case HCI_SCODATA_PKT:
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300265 bt_cb(h5->rx_skb)->pkt_type = H5_HDR_PKT_TYPE(hdr);
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300266
267 /* Remove Three-wire header */
268 skb_pull(h5->rx_skb, 4);
269
270 hci_recv_frame(h5->rx_skb);
271 h5->rx_skb = NULL;
272
273 break;
274
275 default:
276 h5_handle_internal_rx(hu);
277 break;
278 }
279
280 h5_reset_rx(h5);
281}
282
283static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
284{
285 struct h5 *h5 = hu->priv;
286
287 BT_DBG("%s 0x%02hhx", hu->hdev->name, c);
288
289 h5_complete_rx_pkt(hu);
290 h5_reset_rx(h5);
291
292 return 0;
293}
294
295static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
296{
297 struct h5 *h5 = hu->priv;
298 const unsigned char *hdr = h5->rx_skb->data;
299
300 BT_DBG("%s 0x%02hhx", hu->hdev->name, c);
301
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300302 if (H5_HDR_CRC(hdr)) {
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300303 h5->rx_func = h5_rx_crc;
304 h5->rx_pending = 2;
305 } else {
306 h5_complete_rx_pkt(hu);
307 h5_reset_rx(h5);
308 }
309
310 return 0;
311}
312
313static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
314{
315 struct h5 *h5 = hu->priv;
316 const unsigned char *hdr = h5->rx_skb->data;
317
318 BT_DBG("%s 0x%02hhx", hu->hdev->name, c);
319
Johan Hedberg40f10222012-07-16 16:12:09 +0300320 BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
321 hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
322 H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
323 H5_HDR_LEN(hdr));
324
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300325 if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
326 BT_ERR("Invalid header checksum");
327 h5_reset_rx(h5);
328 return 0;
329 }
330
Johan Hedberg40f10222012-07-16 16:12:09 +0300331 if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300332 BT_ERR("Out-of-order packet arrived (%u != %u)",
Johan Hedberg40f10222012-07-16 16:12:09 +0300333 H5_HDR_SEQ(hdr), h5->tx_ack);
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300334 h5_reset_rx(h5);
335 return 0;
336 }
337
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300338 h5->rx_func = h5_rx_payload;
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300339 h5->rx_pending = H5_HDR_LEN(hdr);
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300340
341 return 0;
342}
343
344static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
345{
346 struct h5 *h5 = hu->priv;
347
348 BT_DBG("%s 0x%02hhx", hu->hdev->name, c);
349
350 if (c == SLIP_DELIMITER)
351 return 1;
352
353 h5->rx_func = h5_rx_3wire_hdr;
354 h5->rx_pending = 4;
355
356 h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
357 if (!h5->rx_skb) {
358 BT_ERR("Can't allocate mem for new packet");
359 h5_reset_rx(h5);
360 return -ENOMEM;
361 }
362
363 h5->rx_skb->dev = (void *) hu->hdev;
364
365 return 0;
366}
367
368static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
369{
370 struct h5 *h5 = hu->priv;
371
372 BT_DBG("%s 0x%02hhx", hu->hdev->name, c);
373
374 if (c == SLIP_DELIMITER)
375 h5->rx_func = h5_rx_pkt_start;
376
377 return 1;
378}
379
380static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
381{
382 const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
383 const u8 *byte = &c;
384
385 if (!h5->rx_esc && c == SLIP_ESC) {
386 h5->rx_esc = true;
387 return;
388 }
389
390 if (h5->rx_esc) {
391 switch (c) {
392 case SLIP_ESC_DELIM:
393 byte = &delim;
394 break;
395 case SLIP_ESC_ESC:
396 byte = &esc;
397 break;
398 default:
399 BT_ERR("Invalid esc byte 0x%02hhx", c);
400 h5_reset_rx(h5);
401 return;
402 }
403
404 h5->rx_esc = false;
405 }
406
407 memcpy(skb_put(h5->rx_skb, 1), byte, 1);
408 h5->rx_pending--;
409
410 BT_DBG("unsliped 0x%02hhx", *byte);
411}
412
413static void h5_reset_rx(struct h5 *h5)
414{
415 if (h5->rx_skb) {
416 kfree_skb(h5->rx_skb);
417 h5->rx_skb = NULL;
418 }
419
420 h5->rx_func = h5_rx_delimiter;
421 h5->rx_pending = 0;
422 h5->rx_esc = false;
423}
424
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300425static int h5_recv(struct hci_uart *hu, void *data, int count)
426{
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300427 struct h5 *h5 = hu->priv;
428 unsigned char *ptr = data;
429
430 BT_DBG("%s count %d", hu->hdev->name, count);
431
432 while (count > 0) {
433 int processed;
434
435 if (h5->rx_pending > 0) {
436 if (*ptr == SLIP_DELIMITER) {
437 BT_ERR("Too short H5 packet");
438 h5_reset_rx(h5);
439 continue;
440 }
441
442 h5_unslip_one_byte(h5, *ptr);
443
444 ptr++; count--;
445 continue;
446 }
447
448 processed = h5->rx_func(hu, *ptr);
449 if (processed < 0)
450 return processed;
451
452 ptr += processed;
453 count -= processed;
454 }
455
456 return 0;
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300457}
458
459static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
460{
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300461 struct h5 *h5 = hu->priv;
462
463 if (skb->len > 0xfff) {
464 BT_ERR("Packet too long (%u bytes)", skb->len);
465 kfree_skb(skb);
466 return 0;
467 }
468
469 switch (bt_cb(skb)->pkt_type) {
470 case HCI_ACLDATA_PKT:
471 case HCI_COMMAND_PKT:
472 skb_queue_tail(&h5->rel, skb);
473 break;
474
475 case HCI_SCODATA_PKT:
476 skb_queue_tail(&h5->unrel, skb);
477 break;
478
479 default:
480 BT_ERR("Unknown packet type %u", bt_cb(skb)->pkt_type);
481 kfree_skb(skb);
482 break;
483 }
484
485 return 0;
486}
487
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300488static void h5_slip_delim(struct sk_buff *skb)
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300489{
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300490 const char delim = SLIP_DELIMITER;
491
492 memcpy(skb_put(skb, 1), &delim, 1);
493}
494
495static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
496{
497 const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
498 const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
499
500 switch (c) {
501 case SLIP_DELIMITER:
502 memcpy(skb_put(skb, 2), &esc_delim, 2);
503 break;
504 case SLIP_ESC:
505 memcpy(skb_put(skb, 2), &esc_esc, 2);
506 break;
507 default:
508 memcpy(skb_put(skb, 1), &c, 1);
509 }
510}
511
Johan Hedberg40f10222012-07-16 16:12:09 +0300512static struct sk_buff *h5_build_pkt(struct hci_uart *hu, bool rel, u8 pkt_type,
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300513 const u8 *data, size_t len)
514{
Johan Hedberg40f10222012-07-16 16:12:09 +0300515 struct h5 *h5 = hu->priv;
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300516 struct sk_buff *nskb;
517 u8 hdr[4];
518 int i;
519
520 /*
521 * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
522 * (because bytes 0xc0 and 0xdb are escaped, worst case is when
523 * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
524 * delimiters at start and end).
525 */
526 nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
527 if (!nskb)
528 return NULL;
529
530 bt_cb(nskb)->pkt_type = pkt_type;
531
532 h5_slip_delim(nskb);
533
Johan Hedberg40f10222012-07-16 16:12:09 +0300534 hdr[0] = h5->tx_ack << 3;
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300535 h5->tx_ack_req = false;
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300536
537 if (rel) {
538 hdr[0] |= 1 << 7;
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300539 hdr[0] |= h5->tx_seq;
540 h5->tx_seq = (h5->tx_seq + 1) % 8;
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300541 }
542
543 hdr[1] = pkt_type | ((len & 0x0f) << 4);
544 hdr[2] = len >> 4;
545 hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
546
Johan Hedberg40f10222012-07-16 16:12:09 +0300547 BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
548 hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
549 H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
550 H5_HDR_LEN(hdr));
551
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300552 for (i = 0; i < 4; i++)
553 h5_slip_one_byte(nskb, hdr[i]);
554
555 for (i = 0; i < len; i++)
556 h5_slip_one_byte(nskb, data[i]);
557
558 h5_slip_delim(nskb);
559
560 return nskb;
561}
562
Johan Hedberg40f10222012-07-16 16:12:09 +0300563static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300564 const u8 *data, size_t len)
565{
566 bool rel;
567
568 switch (pkt_type) {
569 case HCI_ACLDATA_PKT:
570 case HCI_COMMAND_PKT:
571 rel = true;
572 break;
573 case HCI_SCODATA_PKT:
574 case HCI_3WIRE_LINK_PKT:
575 case HCI_3WIRE_ACK_PKT:
576 rel = false;
577 break;
578 default:
579 BT_ERR("Unknown packet type %u", pkt_type);
580 return NULL;
581 }
582
Johan Hedberg40f10222012-07-16 16:12:09 +0300583 return h5_build_pkt(hu, rel, pkt_type, data, len);
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300584}
585
586static struct sk_buff *h5_dequeue(struct hci_uart *hu)
587{
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300588 struct h5 *h5 = hu->priv;
Johan Hedberg3f27e952012-07-16 16:12:04 +0300589 unsigned long flags;
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300590 struct sk_buff *skb, *nskb;
591
592 if ((skb = skb_dequeue(&h5->unrel)) != NULL) {
Johan Hedberg40f10222012-07-16 16:12:09 +0300593 nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300594 skb->data, skb->len);
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300595 if (nskb) {
596 kfree_skb(skb);
597 return nskb;
598 }
599
600 skb_queue_head(&h5->unrel, skb);
601 BT_ERR("Could not dequeue pkt because alloc_skb failed");
602 }
603
Johan Hedberg3f27e952012-07-16 16:12:04 +0300604 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
605
606 if (h5->unack.qlen >= H5_TXWINSIZE)
607 goto unlock;
608
609 if ((skb = skb_dequeue(&h5->rel)) != NULL) {
Johan Hedberg40f10222012-07-16 16:12:09 +0300610 nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
Johan Hedbergc0a1b732012-07-16 16:12:06 +0300611 skb->data, skb->len);
Johan Hedberg3f27e952012-07-16 16:12:04 +0300612 if (nskb) {
613 __skb_queue_tail(&h5->unack, skb);
614 mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
615 spin_unlock_irqrestore(&h5->unack.lock, flags);
616 return nskb;
617 }
618
619 skb_queue_head(&h5->rel, skb);
620 BT_ERR("Could not dequeue pkt because alloc_skb failed");
621 }
622
623unlock:
624 spin_unlock_irqrestore(&h5->unack.lock, flags);
625
Johan Hedberg43eb12d2012-07-16 16:12:08 +0300626 if (h5->tx_ack_req)
Johan Hedberg40f10222012-07-16 16:12:09 +0300627 return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300628
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300629 return NULL;
630}
631
632static int h5_flush(struct hci_uart *hu)
633{
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300634 BT_DBG("hu %p", hu);
635 return 0;
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300636}
637
638static struct hci_uart_proto h5p = {
639 .id = HCI_UART_3WIRE,
640 .open = h5_open,
641 .close = h5_close,
642 .recv = h5_recv,
643 .enqueue = h5_enqueue,
644 .dequeue = h5_dequeue,
645 .flush = h5_flush,
646};
647
648int __init h5_init(void)
649{
650 int err = hci_uart_register_proto(&h5p);
651
652 if (!err)
653 BT_INFO("HCI Three-wire UART (H5) protocol initialized");
654 else
655 BT_ERR("HCI Three-wire UART (H5) protocol init failed");
656
657 return err;
658}
659
660int __exit h5_deinit(void)
661{
662 return hci_uart_unregister_proto(&h5p);
663}