blob: e73b1013ba73403c260915554644d1c2d344899a [file] [log] [blame]
Sean Wang7237c4c2018-08-08 01:52:48 +08001// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2018 MediaTek Inc.
3
4/*
5 * Bluetooth support for MediaTek serial devices
6 *
7 * Author: Sean Wang <sean.wang@mediatek.com>
8 *
9 */
10
11#include <asm/unaligned.h>
12#include <linux/atomic.h>
13#include <linux/clk.h>
14#include <linux/firmware.h>
Sean Wange0b67032019-02-15 07:19:37 +080015#include <linux/iopoll.h>
Sean Wang7237c4c2018-08-08 01:52:48 +080016#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/pm_runtime.h>
20#include <linux/serdev.h>
21#include <linux/skbuff.h>
22
23#include <net/bluetooth/bluetooth.h>
24#include <net/bluetooth/hci_core.h>
25
26#include "h4_recv.h"
27
28#define VERSION "0.1"
29
30#define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin"
31
32#define MTK_STP_TLR_SIZE 2
33
34#define BTMTKUART_TX_STATE_ACTIVE 1
35#define BTMTKUART_TX_STATE_WAKEUP 2
36#define BTMTKUART_TX_WAIT_VND_EVT 3
37
38enum {
39 MTK_WMT_PATCH_DWNLD = 0x1,
40 MTK_WMT_FUNC_CTRL = 0x6,
Sean Wange0b67032019-02-15 07:19:37 +080041 MTK_WMT_RST = 0x7,
42 MTK_WMT_SEMAPHORE = 0x17,
43};
44
45enum {
46 BTMTK_WMT_INVALID,
47 BTMTK_WMT_PATCH_UNDONE,
48 BTMTK_WMT_PATCH_DONE,
49 BTMTK_WMT_ON_UNDONE,
50 BTMTK_WMT_ON_DONE,
51 BTMTK_WMT_ON_PROGRESS,
Sean Wang7237c4c2018-08-08 01:52:48 +080052};
53
54struct mtk_stp_hdr {
55 u8 prefix;
56 __be16 dlen;
57 u8 cs;
58} __packed;
59
60struct mtk_wmt_hdr {
61 u8 dir;
62 u8 op;
63 __le16 dlen;
64 u8 flag;
65} __packed;
66
67struct mtk_hci_wmt_cmd {
68 struct mtk_wmt_hdr hdr;
69 u8 data[256];
70} __packed;
71
Sean Wange0b67032019-02-15 07:19:37 +080072struct btmtk_hci_wmt_evt {
73 struct hci_event_hdr hhdr;
74 struct mtk_wmt_hdr whdr;
75} __packed;
76
77struct btmtk_hci_wmt_evt_funcc {
78 struct btmtk_hci_wmt_evt hwhdr;
79 __be16 status;
80} __packed;
81
82struct btmtk_tci_sleep {
83 u8 mode;
84 __le16 duration;
85 __le16 host_duration;
86 u8 host_wakeup_pin;
87 u8 time_compensation;
88} __packed;
89
Sean Wang88e5f362019-02-15 07:19:36 +080090struct btmtk_hci_wmt_params {
91 u8 op;
92 u8 flag;
93 u16 dlen;
94 const void *data;
95 u32 *status;
96};
97
Sean Wang7237c4c2018-08-08 01:52:48 +080098struct btmtkuart_dev {
99 struct hci_dev *hdev;
100 struct serdev_device *serdev;
101 struct clk *clk;
102
103 struct work_struct tx_work;
104 unsigned long tx_state;
105 struct sk_buff_head txq;
106
107 struct sk_buff *rx_skb;
Sean Wange0b67032019-02-15 07:19:37 +0800108 struct sk_buff *evt_skb;
Sean Wang7237c4c2018-08-08 01:52:48 +0800109
110 u8 stp_pad[6];
111 u8 stp_cursor;
112 u16 stp_dlen;
113};
114
Sean Wang88e5f362019-02-15 07:19:36 +0800115static int mtk_hci_wmt_sync(struct hci_dev *hdev,
116 struct btmtk_hci_wmt_params *wmt_params)
Sean Wang7237c4c2018-08-08 01:52:48 +0800117{
118 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
Sean Wange0b67032019-02-15 07:19:37 +0800119 struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
120 u32 hlen, status = BTMTK_WMT_INVALID;
121 struct btmtk_hci_wmt_evt *wmt_evt;
Sean Wang7237c4c2018-08-08 01:52:48 +0800122 struct mtk_hci_wmt_cmd wc;
123 struct mtk_wmt_hdr *hdr;
Sean Wang7237c4c2018-08-08 01:52:48 +0800124 int err;
125
Sean Wang88e5f362019-02-15 07:19:36 +0800126 hlen = sizeof(*hdr) + wmt_params->dlen;
Sean Wang7237c4c2018-08-08 01:52:48 +0800127 if (hlen > 255)
128 return -EINVAL;
129
130 hdr = (struct mtk_wmt_hdr *)&wc;
131 hdr->dir = 1;
Sean Wang88e5f362019-02-15 07:19:36 +0800132 hdr->op = wmt_params->op;
133 hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
134 hdr->flag = wmt_params->flag;
135 memcpy(wc.data, wmt_params->data, wmt_params->dlen);
Sean Wang7237c4c2018-08-08 01:52:48 +0800136
137 set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
138
139 err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
140 if (err < 0) {
141 clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
142 return err;
143 }
144
145 /* The vendor specific WMT commands are all answered by a vendor
146 * specific event and will not have the Command Status or Command
147 * Complete as with usual HCI command flow control.
148 *
149 * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT
Sean Wangadf5d732019-02-15 07:19:34 +0800150 * state to be cleared. The driver specific event receive routine
Sean Wang7237c4c2018-08-08 01:52:48 +0800151 * will clear that state and with that indicate completion of the
152 * WMT command.
153 */
154 err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT,
155 TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
156 if (err == -EINTR) {
157 bt_dev_err(hdev, "Execution of wmt command interrupted");
Sean Wang77f328db2019-02-15 07:19:35 +0800158 clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
Sean Wang7237c4c2018-08-08 01:52:48 +0800159 return err;
160 }
161
162 if (err) {
163 bt_dev_err(hdev, "Execution of wmt command timed out");
Sean Wang77f328db2019-02-15 07:19:35 +0800164 clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
Sean Wang7237c4c2018-08-08 01:52:48 +0800165 return -ETIMEDOUT;
166 }
167
Sean Wange0b67032019-02-15 07:19:37 +0800168 /* Parse and handle the return WMT event */
169 wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
170 if (wmt_evt->whdr.op != hdr->op) {
171 bt_dev_err(hdev, "Wrong op received %d expected %d",
172 wmt_evt->whdr.op, hdr->op);
173 err = -EIO;
174 goto err_free_skb;
175 }
176
177 switch (wmt_evt->whdr.op) {
178 case MTK_WMT_SEMAPHORE:
179 if (wmt_evt->whdr.flag == 2)
180 status = BTMTK_WMT_PATCH_UNDONE;
181 else
182 status = BTMTK_WMT_PATCH_DONE;
183 break;
184 case MTK_WMT_FUNC_CTRL:
185 wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
186 if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
187 status = BTMTK_WMT_ON_DONE;
188 else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
189 status = BTMTK_WMT_ON_PROGRESS;
190 else
191 status = BTMTK_WMT_ON_UNDONE;
192 break;
193 }
194
195 if (wmt_params->status)
196 *wmt_params->status = status;
197
198err_free_skb:
199 kfree_skb(bdev->evt_skb);
200 bdev->evt_skb = NULL;
201
202 return err;
Sean Wang7237c4c2018-08-08 01:52:48 +0800203}
204
205static int mtk_setup_fw(struct hci_dev *hdev)
206{
Sean Wang88e5f362019-02-15 07:19:36 +0800207 struct btmtk_hci_wmt_params wmt_params;
Sean Wang7237c4c2018-08-08 01:52:48 +0800208 const struct firmware *fw;
209 const u8 *fw_ptr;
210 size_t fw_size;
211 int err, dlen;
212 u8 flag;
213
214 err = request_firmware(&fw, FIRMWARE_MT7622, &hdev->dev);
215 if (err < 0) {
216 bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
217 return err;
218 }
219
220 fw_ptr = fw->data;
221 fw_size = fw->size;
222
223 /* The size of patch header is 30 bytes, should be skip */
Gustavo A. R. Silvaaddb3ff2018-08-14 10:10:31 -0500224 if (fw_size < 30) {
225 err = -EINVAL;
226 goto free_fw;
227 }
Sean Wang7237c4c2018-08-08 01:52:48 +0800228
229 fw_size -= 30;
230 fw_ptr += 30;
231 flag = 1;
232
Sean Wang88e5f362019-02-15 07:19:36 +0800233 wmt_params.op = MTK_WMT_PATCH_DWNLD;
234 wmt_params.status = NULL;
235
Sean Wang7237c4c2018-08-08 01:52:48 +0800236 while (fw_size > 0) {
237 dlen = min_t(int, 250, fw_size);
238
239 /* Tell device the position in sequence */
240 if (fw_size - dlen <= 0)
241 flag = 3;
242 else if (fw_size < fw->size - 30)
243 flag = 2;
244
Sean Wang88e5f362019-02-15 07:19:36 +0800245 wmt_params.flag = flag;
246 wmt_params.dlen = dlen;
247 wmt_params.data = fw_ptr;
248
249 err = mtk_hci_wmt_sync(hdev, &wmt_params);
Sean Wang7237c4c2018-08-08 01:52:48 +0800250 if (err < 0) {
251 bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
252 err);
Sean Wange0b67032019-02-15 07:19:37 +0800253 goto free_fw;
Sean Wang7237c4c2018-08-08 01:52:48 +0800254 }
255
256 fw_size -= dlen;
257 fw_ptr += dlen;
258 }
259
Sean Wange0b67032019-02-15 07:19:37 +0800260 wmt_params.op = MTK_WMT_RST;
261 wmt_params.flag = 4;
262 wmt_params.dlen = 0;
263 wmt_params.data = NULL;
264 wmt_params.status = NULL;
265
266 /* Activate funciton the firmware providing to */
267 err = mtk_hci_wmt_sync(hdev, &wmt_params);
268 if (err < 0) {
269 bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
270 goto free_fw;
271 }
272
273 /* Wait a few moments for firmware activation done */
274 usleep_range(10000, 12000);
275
Gustavo A. R. Silvaaddb3ff2018-08-14 10:10:31 -0500276free_fw:
Sean Wang7237c4c2018-08-08 01:52:48 +0800277 release_firmware(fw);
Sean Wang7237c4c2018-08-08 01:52:48 +0800278 return err;
279}
280
281static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
282{
283 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
284 struct hci_event_hdr *hdr = (void *)skb->data;
285 int err;
286
287 /* Fix up the vendor event id with 0xff for vendor specific instead
288 * of 0xe4 so that event send via monitoring socket can be parsed
289 * properly.
290 */
291 if (hdr->evt == 0xe4)
292 hdr->evt = HCI_EV_VENDOR;
293
Sean Wange0b67032019-02-15 07:19:37 +0800294 /* When someone waits for the WMT event, the skb is being cloned
295 * and being processed the events from there then.
296 */
297 if (test_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state)) {
298 bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
299 if (!bdev->evt_skb) {
300 err = -ENOMEM;
301 goto err_out;
302 }
303 }
304
Sean Wang7237c4c2018-08-08 01:52:48 +0800305 err = hci_recv_frame(hdev, skb);
Sean Wange0b67032019-02-15 07:19:37 +0800306 if (err < 0)
307 goto err_free_skb;
Sean Wang7237c4c2018-08-08 01:52:48 +0800308
309 if (hdr->evt == HCI_EV_VENDOR) {
310 if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT,
311 &bdev->tx_state)) {
312 /* Barrier to sync with other CPUs */
313 smp_mb__after_atomic();
314 wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT);
315 }
316 }
317
Sean Wange0b67032019-02-15 07:19:37 +0800318 return 0;
319
320err_free_skb:
321 kfree_skb(bdev->evt_skb);
322 bdev->evt_skb = NULL;
323
324err_out:
Sean Wang7237c4c2018-08-08 01:52:48 +0800325 return err;
326}
327
328static const struct h4_recv_pkt mtk_recv_pkts[] = {
329 { H4_RECV_ACL, .recv = hci_recv_frame },
330 { H4_RECV_SCO, .recv = hci_recv_frame },
331 { H4_RECV_EVENT, .recv = btmtkuart_recv_event },
332};
333
334static void btmtkuart_tx_work(struct work_struct *work)
335{
336 struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev,
337 tx_work);
338 struct serdev_device *serdev = bdev->serdev;
339 struct hci_dev *hdev = bdev->hdev;
340
341 while (1) {
342 clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
343
344 while (1) {
345 struct sk_buff *skb = skb_dequeue(&bdev->txq);
346 int len;
347
348 if (!skb)
349 break;
350
351 len = serdev_device_write_buf(serdev, skb->data,
352 skb->len);
353 hdev->stat.byte_tx += len;
354
355 skb_pull(skb, len);
356 if (skb->len > 0) {
357 skb_queue_head(&bdev->txq, skb);
358 break;
359 }
360
361 switch (hci_skb_pkt_type(skb)) {
362 case HCI_COMMAND_PKT:
363 hdev->stat.cmd_tx++;
364 break;
365 case HCI_ACLDATA_PKT:
366 hdev->stat.acl_tx++;
367 break;
368 case HCI_SCODATA_PKT:
369 hdev->stat.sco_tx++;
370 break;
371 }
372
373 kfree_skb(skb);
374 }
375
376 if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state))
377 break;
378 }
379
380 clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state);
381}
382
383static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev)
384{
385 if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state))
386 set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
387
388 schedule_work(&bdev->tx_work);
389}
390
391static const unsigned char *
392mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
393 int *sz_h4)
394{
395 struct mtk_stp_hdr *shdr;
396
397 /* The cursor is reset when all the data of STP is consumed out */
398 if (!bdev->stp_dlen && bdev->stp_cursor >= 6)
399 bdev->stp_cursor = 0;
400
401 /* Filling pad until all STP info is obtained */
402 while (bdev->stp_cursor < 6 && count > 0) {
403 bdev->stp_pad[bdev->stp_cursor] = *data;
404 bdev->stp_cursor++;
405 data++;
406 count--;
407 }
408
409 /* Retrieve STP info and have a sanity check */
410 if (!bdev->stp_dlen && bdev->stp_cursor >= 6) {
411 shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2];
412 bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff;
413
414 /* Resync STP when unexpected data is being read */
415 if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
416 bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
417 shdr->prefix, bdev->stp_dlen);
418 bdev->stp_cursor = 2;
419 bdev->stp_dlen = 0;
420 }
421 }
422
423 /* Directly quit when there's no data found for H4 can process */
424 if (count <= 0)
425 return NULL;
426
427 /* Tranlate to how much the size of data H4 can handle so far */
428 *sz_h4 = min_t(int, count, bdev->stp_dlen);
429
430 /* Update the remaining size of STP packet */
431 bdev->stp_dlen -= *sz_h4;
432
433 /* Data points to STP payload which can be handled by H4 */
434 return data;
435}
436
437static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
438{
439 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
440 const unsigned char *p_left = data, *p_h4;
441 int sz_left = count, sz_h4, adv;
442 int err;
443
444 while (sz_left > 0) {
445 /* The serial data received from MT7622 BT controller is
446 * at all time padded around with the STP header and tailer.
447 *
448 * A full STP packet is looking like
449 * -----------------------------------
450 * | STP header | H:4 | STP tailer |
451 * -----------------------------------
452 * but it doesn't guarantee to contain a full H:4 packet which
453 * means that it's possible for multiple STP packets forms a
454 * full H:4 packet that means extra STP header + length doesn't
455 * indicate a full H:4 frame, things can fragment. Whose length
456 * recorded in STP header just shows up the most length the
457 * H:4 engine can handle currently.
458 */
459
460 p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4);
461 if (!p_h4)
462 break;
463
464 adv = p_h4 - p_left;
465 sz_left -= adv;
466 p_left += adv;
467
468 bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
469 sz_h4, mtk_recv_pkts,
Dan Carpenter330ad752018-08-13 12:32:35 +0300470 ARRAY_SIZE(mtk_recv_pkts));
Sean Wang7237c4c2018-08-08 01:52:48 +0800471 if (IS_ERR(bdev->rx_skb)) {
472 err = PTR_ERR(bdev->rx_skb);
473 bt_dev_err(bdev->hdev,
474 "Frame reassembly failed (%d)", err);
475 bdev->rx_skb = NULL;
476 return err;
477 }
478
479 sz_left -= sz_h4;
480 p_left += sz_h4;
481 }
482
483 return 0;
484}
485
486static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
487 size_t count)
488{
489 struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
490 int err;
491
492 err = btmtkuart_recv(bdev->hdev, data, count);
493 if (err < 0)
494 return err;
495
496 bdev->hdev->stat.byte_rx += count;
497
498 return count;
499}
500
501static void btmtkuart_write_wakeup(struct serdev_device *serdev)
502{
503 struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
504
505 btmtkuart_tx_wakeup(bdev);
506}
507
508static const struct serdev_device_ops btmtkuart_client_ops = {
509 .receive_buf = btmtkuart_receive_buf,
510 .write_wakeup = btmtkuart_write_wakeup,
511};
512
513static int btmtkuart_open(struct hci_dev *hdev)
514{
515 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
516 struct device *dev;
517 int err;
518
519 err = serdev_device_open(bdev->serdev);
520 if (err) {
521 bt_dev_err(hdev, "Unable to open UART device %s",
522 dev_name(&bdev->serdev->dev));
523 goto err_open;
524 }
525
526 bdev->stp_cursor = 2;
527 bdev->stp_dlen = 0;
528
529 dev = &bdev->serdev->dev;
530
531 /* Enable the power domain and clock the device requires */
532 pm_runtime_enable(dev);
533 err = pm_runtime_get_sync(dev);
534 if (err < 0) {
535 pm_runtime_put_noidle(dev);
536 goto err_disable_rpm;
537 }
538
539 err = clk_prepare_enable(bdev->clk);
540 if (err < 0)
541 goto err_put_rpm;
542
543 return 0;
544
545err_put_rpm:
546 pm_runtime_put_sync(dev);
547err_disable_rpm:
548 pm_runtime_disable(dev);
549err_open:
550 return err;
551}
552
553static int btmtkuart_close(struct hci_dev *hdev)
554{
555 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
556 struct device *dev = &bdev->serdev->dev;
557
558 /* Shutdown the clock and power domain the device requires */
559 clk_disable_unprepare(bdev->clk);
560 pm_runtime_put_sync(dev);
561 pm_runtime_disable(dev);
562
563 serdev_device_close(bdev->serdev);
564
565 return 0;
566}
567
568static int btmtkuart_flush(struct hci_dev *hdev)
569{
570 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
571
572 /* Flush any pending characters */
573 serdev_device_write_flush(bdev->serdev);
574 skb_queue_purge(&bdev->txq);
575
576 cancel_work_sync(&bdev->tx_work);
577
578 kfree_skb(bdev->rx_skb);
579 bdev->rx_skb = NULL;
580
581 bdev->stp_cursor = 2;
582 bdev->stp_dlen = 0;
583
584 return 0;
585}
586
Sean Wange0b67032019-02-15 07:19:37 +0800587static int btmtkuart_func_query(struct hci_dev *hdev)
588{
589 struct btmtk_hci_wmt_params wmt_params;
590 int status, err;
591 u8 param = 0;
592
593 /* Query whether the function is enabled */
594 wmt_params.op = MTK_WMT_FUNC_CTRL;
595 wmt_params.flag = 4;
596 wmt_params.dlen = sizeof(param);
597 wmt_params.data = &param;
598 wmt_params.status = &status;
599
600 err = mtk_hci_wmt_sync(hdev, &wmt_params);
601 if (err < 0) {
602 bt_dev_err(hdev, "Failed to query function status (%d)", err);
603 return err;
604 }
605
606 return status;
607}
608
Sean Wang7237c4c2018-08-08 01:52:48 +0800609static int btmtkuart_setup(struct hci_dev *hdev)
610{
Sean Wang88e5f362019-02-15 07:19:36 +0800611 struct btmtk_hci_wmt_params wmt_params;
Sean Wange0b67032019-02-15 07:19:37 +0800612 ktime_t calltime, delta, rettime;
613 struct btmtk_tci_sleep tci_sleep;
614 unsigned long long duration;
615 struct sk_buff *skb;
616 int err, status;
Sean Wang7237c4c2018-08-08 01:52:48 +0800617 u8 param = 0x1;
Sean Wange0b67032019-02-15 07:19:37 +0800618
619 calltime = ktime_get();
620
621 /* Query whether the firmware is already download */
622 wmt_params.op = MTK_WMT_SEMAPHORE;
623 wmt_params.flag = 1;
624 wmt_params.dlen = 0;
625 wmt_params.data = NULL;
626 wmt_params.status = &status;
627
628 err = mtk_hci_wmt_sync(hdev, &wmt_params);
629 if (err < 0) {
630 bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
631 return err;
632 }
633
634 if (status == BTMTK_WMT_PATCH_DONE) {
635 bt_dev_info(hdev, "Firmware already downloaded");
636 goto ignore_setup_fw;
637 }
Sean Wang7237c4c2018-08-08 01:52:48 +0800638
639 /* Setup a firmware which the device definitely requires */
640 err = mtk_setup_fw(hdev);
641 if (err < 0)
642 return err;
643
Sean Wange0b67032019-02-15 07:19:37 +0800644ignore_setup_fw:
645 /* Query whether the device is already enabled */
646 err = readx_poll_timeout(btmtkuart_func_query, hdev, status,
647 status < 0 || status != BTMTK_WMT_ON_PROGRESS,
648 2000, 5000000);
649 /* -ETIMEDOUT happens */
650 if (err < 0)
Sean Wang7237c4c2018-08-08 01:52:48 +0800651 return err;
Sean Wange0b67032019-02-15 07:19:37 +0800652
653 /* The other errors happen in btusb_mtk_func_query */
654 if (status < 0)
655 return status;
656
657 if (status == BTMTK_WMT_ON_DONE) {
658 bt_dev_info(hdev, "function already on");
659 goto ignore_func_on;
Sean Wang7237c4c2018-08-08 01:52:48 +0800660 }
661
662 /* Enable Bluetooth protocol */
Sean Wang88e5f362019-02-15 07:19:36 +0800663 wmt_params.op = MTK_WMT_FUNC_CTRL;
664 wmt_params.flag = 0;
665 wmt_params.dlen = sizeof(param);
666 wmt_params.data = &param;
667 wmt_params.status = NULL;
668
669 err = mtk_hci_wmt_sync(hdev, &wmt_params);
Sean Wang7237c4c2018-08-08 01:52:48 +0800670 if (err < 0) {
671 bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
672 return err;
673 }
674
Sean Wange0b67032019-02-15 07:19:37 +0800675ignore_func_on:
676 /* Apply the low power environment setup */
677 tci_sleep.mode = 0x5;
678 tci_sleep.duration = cpu_to_le16(0x640);
679 tci_sleep.host_duration = cpu_to_le16(0x640);
680 tci_sleep.host_wakeup_pin = 0;
681 tci_sleep.time_compensation = 0;
682
683 skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
684 HCI_INIT_TIMEOUT);
685 if (IS_ERR(skb)) {
686 err = PTR_ERR(skb);
687 bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
688 return err;
689 }
690 kfree_skb(skb);
691
692 rettime = ktime_get();
693 delta = ktime_sub(rettime, calltime);
694 duration = (unsigned long long)ktime_to_ns(delta) >> 10;
695
696 bt_dev_info(hdev, "Device setup in %llu usecs", duration);
697
Sean Wang7237c4c2018-08-08 01:52:48 +0800698 return 0;
699}
700
701static int btmtkuart_shutdown(struct hci_dev *hdev)
702{
Sean Wang88e5f362019-02-15 07:19:36 +0800703 struct btmtk_hci_wmt_params wmt_params;
Sean Wang7237c4c2018-08-08 01:52:48 +0800704 u8 param = 0x0;
705 int err;
706
707 /* Disable the device */
Sean Wang88e5f362019-02-15 07:19:36 +0800708 wmt_params.op = MTK_WMT_FUNC_CTRL;
709 wmt_params.flag = 0;
710 wmt_params.dlen = sizeof(param);
711 wmt_params.data = &param;
712 wmt_params.status = NULL;
713
714 err = mtk_hci_wmt_sync(hdev, &wmt_params);
Sean Wang7237c4c2018-08-08 01:52:48 +0800715 if (err < 0) {
716 bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
717 return err;
718 }
719
720 return 0;
721}
722
723static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
724{
725 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
726 struct mtk_stp_hdr *shdr;
727 int err, dlen, type = 0;
728
729 /* Prepend skb with frame type */
730 memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
731
732 /* Make sure that there is enough rooms for STP header and trailer */
733 if (unlikely(skb_headroom(skb) < sizeof(*shdr)) ||
734 (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) {
735 err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE,
736 GFP_ATOMIC);
737 if (err < 0)
738 return err;
739 }
740
741 /* Add the STP header */
742 dlen = skb->len;
743 shdr = skb_push(skb, sizeof(*shdr));
744 shdr->prefix = 0x80;
745 shdr->dlen = cpu_to_be16((dlen & 0x0fff) | (type << 12));
746 shdr->cs = 0; /* MT7622 doesn't care about checksum value */
747
748 /* Add the STP trailer */
749 skb_put_zero(skb, MTK_STP_TLR_SIZE);
750
751 skb_queue_tail(&bdev->txq, skb);
752
753 btmtkuart_tx_wakeup(bdev);
754 return 0;
755}
756
757static int btmtkuart_probe(struct serdev_device *serdev)
758{
759 struct btmtkuart_dev *bdev;
760 struct hci_dev *hdev;
761
762 bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL);
763 if (!bdev)
764 return -ENOMEM;
765
766 bdev->clk = devm_clk_get(&serdev->dev, "ref");
767 if (IS_ERR(bdev->clk))
768 return PTR_ERR(bdev->clk);
769
770 bdev->serdev = serdev;
771 serdev_device_set_drvdata(serdev, bdev);
772
773 serdev_device_set_client_ops(serdev, &btmtkuart_client_ops);
774
775 INIT_WORK(&bdev->tx_work, btmtkuart_tx_work);
776 skb_queue_head_init(&bdev->txq);
777
778 /* Initialize and register HCI device */
779 hdev = hci_alloc_dev();
780 if (!hdev) {
781 dev_err(&serdev->dev, "Can't allocate HCI device\n");
782 return -ENOMEM;
783 }
784
785 bdev->hdev = hdev;
786
787 hdev->bus = HCI_UART;
788 hci_set_drvdata(hdev, bdev);
789
790 hdev->open = btmtkuart_open;
791 hdev->close = btmtkuart_close;
792 hdev->flush = btmtkuart_flush;
793 hdev->setup = btmtkuart_setup;
794 hdev->shutdown = btmtkuart_shutdown;
795 hdev->send = btmtkuart_send_frame;
796 SET_HCIDEV_DEV(hdev, &serdev->dev);
797
798 hdev->manufacturer = 70;
799 set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
800
801 if (hci_register_dev(hdev) < 0) {
802 dev_err(&serdev->dev, "Can't register HCI device\n");
803 hci_free_dev(hdev);
804 return -ENODEV;
805 }
806
807 return 0;
808}
809
810static void btmtkuart_remove(struct serdev_device *serdev)
811{
812 struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
813 struct hci_dev *hdev = bdev->hdev;
814
815 hci_unregister_dev(hdev);
816 hci_free_dev(hdev);
817}
818
819#ifdef CONFIG_OF
820static const struct of_device_id mtk_of_match_table[] = {
821 { .compatible = "mediatek,mt7622-bluetooth"},
822 { }
823};
824MODULE_DEVICE_TABLE(of, mtk_of_match_table);
825#endif
826
827static struct serdev_device_driver btmtkuart_driver = {
828 .probe = btmtkuart_probe,
829 .remove = btmtkuart_remove,
830 .driver = {
831 .name = "btmtkuart",
832 .of_match_table = of_match_ptr(mtk_of_match_table),
833 },
834};
835
836module_serdev_device_driver(btmtkuart_driver);
837
838MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
839MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION);
840MODULE_VERSION(VERSION);
841MODULE_LICENSE("GPL");
842MODULE_FIRMWARE(FIRMWARE_MT7622);