Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | // Copyright (c) 2018 MediaTek Inc. |
| 3 | |
| 4 | /* |
| 5 | * Bluetooth support for MediaTek serial devices |
| 6 | * |
| 7 | * Author: Sean Wang <sean.wang@mediatek.com> |
| 8 | * |
| 9 | */ |
| 10 | |
| 11 | #include <asm/unaligned.h> |
| 12 | #include <linux/atomic.h> |
| 13 | #include <linux/clk.h> |
| 14 | #include <linux/firmware.h> |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/of.h> |
| 18 | #include <linux/pm_runtime.h> |
| 19 | #include <linux/serdev.h> |
| 20 | #include <linux/skbuff.h> |
| 21 | |
| 22 | #include <net/bluetooth/bluetooth.h> |
| 23 | #include <net/bluetooth/hci_core.h> |
| 24 | |
| 25 | #include "h4_recv.h" |
| 26 | |
| 27 | #define VERSION "0.1" |
| 28 | |
| 29 | #define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin" |
| 30 | |
| 31 | #define MTK_STP_TLR_SIZE 2 |
| 32 | |
| 33 | #define BTMTKUART_TX_STATE_ACTIVE 1 |
| 34 | #define BTMTKUART_TX_STATE_WAKEUP 2 |
| 35 | #define BTMTKUART_TX_WAIT_VND_EVT 3 |
| 36 | |
| 37 | enum { |
| 38 | MTK_WMT_PATCH_DWNLD = 0x1, |
| 39 | MTK_WMT_FUNC_CTRL = 0x6, |
| 40 | MTK_WMT_RST = 0x7 |
| 41 | }; |
| 42 | |
| 43 | struct mtk_stp_hdr { |
| 44 | u8 prefix; |
| 45 | __be16 dlen; |
| 46 | u8 cs; |
| 47 | } __packed; |
| 48 | |
| 49 | struct mtk_wmt_hdr { |
| 50 | u8 dir; |
| 51 | u8 op; |
| 52 | __le16 dlen; |
| 53 | u8 flag; |
| 54 | } __packed; |
| 55 | |
| 56 | struct mtk_hci_wmt_cmd { |
| 57 | struct mtk_wmt_hdr hdr; |
| 58 | u8 data[256]; |
| 59 | } __packed; |
| 60 | |
Sean Wang | 88e5f36 | 2019-02-15 07:19:36 +0800 | [diff] [blame^] | 61 | struct btmtk_hci_wmt_params { |
| 62 | u8 op; |
| 63 | u8 flag; |
| 64 | u16 dlen; |
| 65 | const void *data; |
| 66 | u32 *status; |
| 67 | }; |
| 68 | |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 69 | struct btmtkuart_dev { |
| 70 | struct hci_dev *hdev; |
| 71 | struct serdev_device *serdev; |
| 72 | struct clk *clk; |
| 73 | |
| 74 | struct work_struct tx_work; |
| 75 | unsigned long tx_state; |
| 76 | struct sk_buff_head txq; |
| 77 | |
| 78 | struct sk_buff *rx_skb; |
| 79 | |
| 80 | u8 stp_pad[6]; |
| 81 | u8 stp_cursor; |
| 82 | u16 stp_dlen; |
| 83 | }; |
| 84 | |
Sean Wang | 88e5f36 | 2019-02-15 07:19:36 +0800 | [diff] [blame^] | 85 | static int mtk_hci_wmt_sync(struct hci_dev *hdev, |
| 86 | struct btmtk_hci_wmt_params *wmt_params) |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 87 | { |
| 88 | struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); |
| 89 | struct mtk_hci_wmt_cmd wc; |
| 90 | struct mtk_wmt_hdr *hdr; |
| 91 | u32 hlen; |
| 92 | int err; |
| 93 | |
Sean Wang | 88e5f36 | 2019-02-15 07:19:36 +0800 | [diff] [blame^] | 94 | hlen = sizeof(*hdr) + wmt_params->dlen; |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 95 | if (hlen > 255) |
| 96 | return -EINVAL; |
| 97 | |
| 98 | hdr = (struct mtk_wmt_hdr *)&wc; |
| 99 | hdr->dir = 1; |
Sean Wang | 88e5f36 | 2019-02-15 07:19:36 +0800 | [diff] [blame^] | 100 | hdr->op = wmt_params->op; |
| 101 | hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); |
| 102 | hdr->flag = wmt_params->flag; |
| 103 | memcpy(wc.data, wmt_params->data, wmt_params->dlen); |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 104 | |
| 105 | set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); |
| 106 | |
| 107 | err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc); |
| 108 | if (err < 0) { |
| 109 | clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); |
| 110 | return err; |
| 111 | } |
| 112 | |
| 113 | /* The vendor specific WMT commands are all answered by a vendor |
| 114 | * specific event and will not have the Command Status or Command |
| 115 | * Complete as with usual HCI command flow control. |
| 116 | * |
| 117 | * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT |
Sean Wang | adf5d73 | 2019-02-15 07:19:34 +0800 | [diff] [blame] | 118 | * state to be cleared. The driver specific event receive routine |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 119 | * will clear that state and with that indicate completion of the |
| 120 | * WMT command. |
| 121 | */ |
| 122 | err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT, |
| 123 | TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); |
| 124 | if (err == -EINTR) { |
| 125 | bt_dev_err(hdev, "Execution of wmt command interrupted"); |
Sean Wang | 77f328db | 2019-02-15 07:19:35 +0800 | [diff] [blame] | 126 | clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 127 | return err; |
| 128 | } |
| 129 | |
| 130 | if (err) { |
| 131 | bt_dev_err(hdev, "Execution of wmt command timed out"); |
Sean Wang | 77f328db | 2019-02-15 07:19:35 +0800 | [diff] [blame] | 132 | clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 133 | return -ETIMEDOUT; |
| 134 | } |
| 135 | |
| 136 | return 0; |
| 137 | } |
| 138 | |
| 139 | static int mtk_setup_fw(struct hci_dev *hdev) |
| 140 | { |
Sean Wang | 88e5f36 | 2019-02-15 07:19:36 +0800 | [diff] [blame^] | 141 | struct btmtk_hci_wmt_params wmt_params; |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 142 | const struct firmware *fw; |
| 143 | const u8 *fw_ptr; |
| 144 | size_t fw_size; |
| 145 | int err, dlen; |
| 146 | u8 flag; |
| 147 | |
| 148 | err = request_firmware(&fw, FIRMWARE_MT7622, &hdev->dev); |
| 149 | if (err < 0) { |
| 150 | bt_dev_err(hdev, "Failed to load firmware file (%d)", err); |
| 151 | return err; |
| 152 | } |
| 153 | |
| 154 | fw_ptr = fw->data; |
| 155 | fw_size = fw->size; |
| 156 | |
| 157 | /* The size of patch header is 30 bytes, should be skip */ |
Gustavo A. R. Silva | addb3ff | 2018-08-14 10:10:31 -0500 | [diff] [blame] | 158 | if (fw_size < 30) { |
| 159 | err = -EINVAL; |
| 160 | goto free_fw; |
| 161 | } |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 162 | |
| 163 | fw_size -= 30; |
| 164 | fw_ptr += 30; |
| 165 | flag = 1; |
| 166 | |
Sean Wang | 88e5f36 | 2019-02-15 07:19:36 +0800 | [diff] [blame^] | 167 | wmt_params.op = MTK_WMT_PATCH_DWNLD; |
| 168 | wmt_params.status = NULL; |
| 169 | |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 170 | while (fw_size > 0) { |
| 171 | dlen = min_t(int, 250, fw_size); |
| 172 | |
| 173 | /* Tell device the position in sequence */ |
| 174 | if (fw_size - dlen <= 0) |
| 175 | flag = 3; |
| 176 | else if (fw_size < fw->size - 30) |
| 177 | flag = 2; |
| 178 | |
Sean Wang | 88e5f36 | 2019-02-15 07:19:36 +0800 | [diff] [blame^] | 179 | wmt_params.flag = flag; |
| 180 | wmt_params.dlen = dlen; |
| 181 | wmt_params.data = fw_ptr; |
| 182 | |
| 183 | err = mtk_hci_wmt_sync(hdev, &wmt_params); |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 184 | if (err < 0) { |
| 185 | bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)", |
| 186 | err); |
| 187 | break; |
| 188 | } |
| 189 | |
| 190 | fw_size -= dlen; |
| 191 | fw_ptr += dlen; |
| 192 | } |
| 193 | |
Gustavo A. R. Silva | addb3ff | 2018-08-14 10:10:31 -0500 | [diff] [blame] | 194 | free_fw: |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 195 | release_firmware(fw); |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 196 | return err; |
| 197 | } |
| 198 | |
| 199 | static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb) |
| 200 | { |
| 201 | struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); |
| 202 | struct hci_event_hdr *hdr = (void *)skb->data; |
| 203 | int err; |
| 204 | |
| 205 | /* Fix up the vendor event id with 0xff for vendor specific instead |
| 206 | * of 0xe4 so that event send via monitoring socket can be parsed |
| 207 | * properly. |
| 208 | */ |
| 209 | if (hdr->evt == 0xe4) |
| 210 | hdr->evt = HCI_EV_VENDOR; |
| 211 | |
| 212 | err = hci_recv_frame(hdev, skb); |
| 213 | |
| 214 | if (hdr->evt == HCI_EV_VENDOR) { |
| 215 | if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT, |
| 216 | &bdev->tx_state)) { |
| 217 | /* Barrier to sync with other CPUs */ |
| 218 | smp_mb__after_atomic(); |
| 219 | wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT); |
| 220 | } |
| 221 | } |
| 222 | |
| 223 | return err; |
| 224 | } |
| 225 | |
| 226 | static const struct h4_recv_pkt mtk_recv_pkts[] = { |
| 227 | { H4_RECV_ACL, .recv = hci_recv_frame }, |
| 228 | { H4_RECV_SCO, .recv = hci_recv_frame }, |
| 229 | { H4_RECV_EVENT, .recv = btmtkuart_recv_event }, |
| 230 | }; |
| 231 | |
| 232 | static void btmtkuart_tx_work(struct work_struct *work) |
| 233 | { |
| 234 | struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev, |
| 235 | tx_work); |
| 236 | struct serdev_device *serdev = bdev->serdev; |
| 237 | struct hci_dev *hdev = bdev->hdev; |
| 238 | |
| 239 | while (1) { |
| 240 | clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state); |
| 241 | |
| 242 | while (1) { |
| 243 | struct sk_buff *skb = skb_dequeue(&bdev->txq); |
| 244 | int len; |
| 245 | |
| 246 | if (!skb) |
| 247 | break; |
| 248 | |
| 249 | len = serdev_device_write_buf(serdev, skb->data, |
| 250 | skb->len); |
| 251 | hdev->stat.byte_tx += len; |
| 252 | |
| 253 | skb_pull(skb, len); |
| 254 | if (skb->len > 0) { |
| 255 | skb_queue_head(&bdev->txq, skb); |
| 256 | break; |
| 257 | } |
| 258 | |
| 259 | switch (hci_skb_pkt_type(skb)) { |
| 260 | case HCI_COMMAND_PKT: |
| 261 | hdev->stat.cmd_tx++; |
| 262 | break; |
| 263 | case HCI_ACLDATA_PKT: |
| 264 | hdev->stat.acl_tx++; |
| 265 | break; |
| 266 | case HCI_SCODATA_PKT: |
| 267 | hdev->stat.sco_tx++; |
| 268 | break; |
| 269 | } |
| 270 | |
| 271 | kfree_skb(skb); |
| 272 | } |
| 273 | |
| 274 | if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state)) |
| 275 | break; |
| 276 | } |
| 277 | |
| 278 | clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state); |
| 279 | } |
| 280 | |
| 281 | static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev) |
| 282 | { |
| 283 | if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state)) |
| 284 | set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state); |
| 285 | |
| 286 | schedule_work(&bdev->tx_work); |
| 287 | } |
| 288 | |
| 289 | static const unsigned char * |
| 290 | mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count, |
| 291 | int *sz_h4) |
| 292 | { |
| 293 | struct mtk_stp_hdr *shdr; |
| 294 | |
| 295 | /* The cursor is reset when all the data of STP is consumed out */ |
| 296 | if (!bdev->stp_dlen && bdev->stp_cursor >= 6) |
| 297 | bdev->stp_cursor = 0; |
| 298 | |
| 299 | /* Filling pad until all STP info is obtained */ |
| 300 | while (bdev->stp_cursor < 6 && count > 0) { |
| 301 | bdev->stp_pad[bdev->stp_cursor] = *data; |
| 302 | bdev->stp_cursor++; |
| 303 | data++; |
| 304 | count--; |
| 305 | } |
| 306 | |
| 307 | /* Retrieve STP info and have a sanity check */ |
| 308 | if (!bdev->stp_dlen && bdev->stp_cursor >= 6) { |
| 309 | shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2]; |
| 310 | bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff; |
| 311 | |
| 312 | /* Resync STP when unexpected data is being read */ |
| 313 | if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) { |
| 314 | bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)", |
| 315 | shdr->prefix, bdev->stp_dlen); |
| 316 | bdev->stp_cursor = 2; |
| 317 | bdev->stp_dlen = 0; |
| 318 | } |
| 319 | } |
| 320 | |
| 321 | /* Directly quit when there's no data found for H4 can process */ |
| 322 | if (count <= 0) |
| 323 | return NULL; |
| 324 | |
| 325 | /* Tranlate to how much the size of data H4 can handle so far */ |
| 326 | *sz_h4 = min_t(int, count, bdev->stp_dlen); |
| 327 | |
| 328 | /* Update the remaining size of STP packet */ |
| 329 | bdev->stp_dlen -= *sz_h4; |
| 330 | |
| 331 | /* Data points to STP payload which can be handled by H4 */ |
| 332 | return data; |
| 333 | } |
| 334 | |
| 335 | static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count) |
| 336 | { |
| 337 | struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); |
| 338 | const unsigned char *p_left = data, *p_h4; |
| 339 | int sz_left = count, sz_h4, adv; |
| 340 | int err; |
| 341 | |
| 342 | while (sz_left > 0) { |
| 343 | /* The serial data received from MT7622 BT controller is |
| 344 | * at all time padded around with the STP header and tailer. |
| 345 | * |
| 346 | * A full STP packet is looking like |
| 347 | * ----------------------------------- |
| 348 | * | STP header | H:4 | STP tailer | |
| 349 | * ----------------------------------- |
| 350 | * but it doesn't guarantee to contain a full H:4 packet which |
| 351 | * means that it's possible for multiple STP packets forms a |
| 352 | * full H:4 packet that means extra STP header + length doesn't |
| 353 | * indicate a full H:4 frame, things can fragment. Whose length |
| 354 | * recorded in STP header just shows up the most length the |
| 355 | * H:4 engine can handle currently. |
| 356 | */ |
| 357 | |
| 358 | p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4); |
| 359 | if (!p_h4) |
| 360 | break; |
| 361 | |
| 362 | adv = p_h4 - p_left; |
| 363 | sz_left -= adv; |
| 364 | p_left += adv; |
| 365 | |
| 366 | bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4, |
| 367 | sz_h4, mtk_recv_pkts, |
Dan Carpenter | 330ad75 | 2018-08-13 12:32:35 +0300 | [diff] [blame] | 368 | ARRAY_SIZE(mtk_recv_pkts)); |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 369 | if (IS_ERR(bdev->rx_skb)) { |
| 370 | err = PTR_ERR(bdev->rx_skb); |
| 371 | bt_dev_err(bdev->hdev, |
| 372 | "Frame reassembly failed (%d)", err); |
| 373 | bdev->rx_skb = NULL; |
| 374 | return err; |
| 375 | } |
| 376 | |
| 377 | sz_left -= sz_h4; |
| 378 | p_left += sz_h4; |
| 379 | } |
| 380 | |
| 381 | return 0; |
| 382 | } |
| 383 | |
| 384 | static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data, |
| 385 | size_t count) |
| 386 | { |
| 387 | struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); |
| 388 | int err; |
| 389 | |
| 390 | err = btmtkuart_recv(bdev->hdev, data, count); |
| 391 | if (err < 0) |
| 392 | return err; |
| 393 | |
| 394 | bdev->hdev->stat.byte_rx += count; |
| 395 | |
| 396 | return count; |
| 397 | } |
| 398 | |
| 399 | static void btmtkuart_write_wakeup(struct serdev_device *serdev) |
| 400 | { |
| 401 | struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); |
| 402 | |
| 403 | btmtkuart_tx_wakeup(bdev); |
| 404 | } |
| 405 | |
| 406 | static const struct serdev_device_ops btmtkuart_client_ops = { |
| 407 | .receive_buf = btmtkuart_receive_buf, |
| 408 | .write_wakeup = btmtkuart_write_wakeup, |
| 409 | }; |
| 410 | |
| 411 | static int btmtkuart_open(struct hci_dev *hdev) |
| 412 | { |
| 413 | struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); |
| 414 | struct device *dev; |
| 415 | int err; |
| 416 | |
| 417 | err = serdev_device_open(bdev->serdev); |
| 418 | if (err) { |
| 419 | bt_dev_err(hdev, "Unable to open UART device %s", |
| 420 | dev_name(&bdev->serdev->dev)); |
| 421 | goto err_open; |
| 422 | } |
| 423 | |
| 424 | bdev->stp_cursor = 2; |
| 425 | bdev->stp_dlen = 0; |
| 426 | |
| 427 | dev = &bdev->serdev->dev; |
| 428 | |
| 429 | /* Enable the power domain and clock the device requires */ |
| 430 | pm_runtime_enable(dev); |
| 431 | err = pm_runtime_get_sync(dev); |
| 432 | if (err < 0) { |
| 433 | pm_runtime_put_noidle(dev); |
| 434 | goto err_disable_rpm; |
| 435 | } |
| 436 | |
| 437 | err = clk_prepare_enable(bdev->clk); |
| 438 | if (err < 0) |
| 439 | goto err_put_rpm; |
| 440 | |
| 441 | return 0; |
| 442 | |
| 443 | err_put_rpm: |
| 444 | pm_runtime_put_sync(dev); |
| 445 | err_disable_rpm: |
| 446 | pm_runtime_disable(dev); |
| 447 | err_open: |
| 448 | return err; |
| 449 | } |
| 450 | |
| 451 | static int btmtkuart_close(struct hci_dev *hdev) |
| 452 | { |
| 453 | struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); |
| 454 | struct device *dev = &bdev->serdev->dev; |
| 455 | |
| 456 | /* Shutdown the clock and power domain the device requires */ |
| 457 | clk_disable_unprepare(bdev->clk); |
| 458 | pm_runtime_put_sync(dev); |
| 459 | pm_runtime_disable(dev); |
| 460 | |
| 461 | serdev_device_close(bdev->serdev); |
| 462 | |
| 463 | return 0; |
| 464 | } |
| 465 | |
| 466 | static int btmtkuart_flush(struct hci_dev *hdev) |
| 467 | { |
| 468 | struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); |
| 469 | |
| 470 | /* Flush any pending characters */ |
| 471 | serdev_device_write_flush(bdev->serdev); |
| 472 | skb_queue_purge(&bdev->txq); |
| 473 | |
| 474 | cancel_work_sync(&bdev->tx_work); |
| 475 | |
| 476 | kfree_skb(bdev->rx_skb); |
| 477 | bdev->rx_skb = NULL; |
| 478 | |
| 479 | bdev->stp_cursor = 2; |
| 480 | bdev->stp_dlen = 0; |
| 481 | |
| 482 | return 0; |
| 483 | } |
| 484 | |
| 485 | static int btmtkuart_setup(struct hci_dev *hdev) |
| 486 | { |
Sean Wang | 88e5f36 | 2019-02-15 07:19:36 +0800 | [diff] [blame^] | 487 | struct btmtk_hci_wmt_params wmt_params; |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 488 | u8 param = 0x1; |
| 489 | int err = 0; |
| 490 | |
| 491 | /* Setup a firmware which the device definitely requires */ |
| 492 | err = mtk_setup_fw(hdev); |
| 493 | if (err < 0) |
| 494 | return err; |
| 495 | |
Sean Wang | 88e5f36 | 2019-02-15 07:19:36 +0800 | [diff] [blame^] | 496 | wmt_params.op = MTK_WMT_RST; |
| 497 | wmt_params.flag = 4; |
| 498 | wmt_params.dlen = 0; |
| 499 | wmt_params.data = NULL; |
| 500 | wmt_params.status = NULL; |
| 501 | |
| 502 | /* Activate funciton the firmware providing to */ |
| 503 | err = mtk_hci_wmt_sync(hdev, &wmt_params); |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 504 | if (err < 0) { |
| 505 | bt_dev_err(hdev, "Failed to send wmt rst (%d)", err); |
| 506 | return err; |
| 507 | } |
| 508 | |
| 509 | /* Enable Bluetooth protocol */ |
Sean Wang | 88e5f36 | 2019-02-15 07:19:36 +0800 | [diff] [blame^] | 510 | wmt_params.op = MTK_WMT_FUNC_CTRL; |
| 511 | wmt_params.flag = 0; |
| 512 | wmt_params.dlen = sizeof(param); |
| 513 | wmt_params.data = ¶m; |
| 514 | wmt_params.status = NULL; |
| 515 | |
| 516 | err = mtk_hci_wmt_sync(hdev, &wmt_params); |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 517 | if (err < 0) { |
| 518 | bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); |
| 519 | return err; |
| 520 | } |
| 521 | |
| 522 | return 0; |
| 523 | } |
| 524 | |
| 525 | static int btmtkuart_shutdown(struct hci_dev *hdev) |
| 526 | { |
Sean Wang | 88e5f36 | 2019-02-15 07:19:36 +0800 | [diff] [blame^] | 527 | struct btmtk_hci_wmt_params wmt_params; |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 528 | u8 param = 0x0; |
| 529 | int err; |
| 530 | |
| 531 | /* Disable the device */ |
Sean Wang | 88e5f36 | 2019-02-15 07:19:36 +0800 | [diff] [blame^] | 532 | wmt_params.op = MTK_WMT_FUNC_CTRL; |
| 533 | wmt_params.flag = 0; |
| 534 | wmt_params.dlen = sizeof(param); |
| 535 | wmt_params.data = ¶m; |
| 536 | wmt_params.status = NULL; |
| 537 | |
| 538 | err = mtk_hci_wmt_sync(hdev, &wmt_params); |
Sean Wang | 7237c4c | 2018-08-08 01:52:48 +0800 | [diff] [blame] | 539 | if (err < 0) { |
| 540 | bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); |
| 541 | return err; |
| 542 | } |
| 543 | |
| 544 | return 0; |
| 545 | } |
| 546 | |
| 547 | static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) |
| 548 | { |
| 549 | struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); |
| 550 | struct mtk_stp_hdr *shdr; |
| 551 | int err, dlen, type = 0; |
| 552 | |
| 553 | /* Prepend skb with frame type */ |
| 554 | memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); |
| 555 | |
| 556 | /* Make sure that there is enough rooms for STP header and trailer */ |
| 557 | if (unlikely(skb_headroom(skb) < sizeof(*shdr)) || |
| 558 | (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) { |
| 559 | err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE, |
| 560 | GFP_ATOMIC); |
| 561 | if (err < 0) |
| 562 | return err; |
| 563 | } |
| 564 | |
| 565 | /* Add the STP header */ |
| 566 | dlen = skb->len; |
| 567 | shdr = skb_push(skb, sizeof(*shdr)); |
| 568 | shdr->prefix = 0x80; |
| 569 | shdr->dlen = cpu_to_be16((dlen & 0x0fff) | (type << 12)); |
| 570 | shdr->cs = 0; /* MT7622 doesn't care about checksum value */ |
| 571 | |
| 572 | /* Add the STP trailer */ |
| 573 | skb_put_zero(skb, MTK_STP_TLR_SIZE); |
| 574 | |
| 575 | skb_queue_tail(&bdev->txq, skb); |
| 576 | |
| 577 | btmtkuart_tx_wakeup(bdev); |
| 578 | return 0; |
| 579 | } |
| 580 | |
| 581 | static int btmtkuart_probe(struct serdev_device *serdev) |
| 582 | { |
| 583 | struct btmtkuart_dev *bdev; |
| 584 | struct hci_dev *hdev; |
| 585 | |
| 586 | bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL); |
| 587 | if (!bdev) |
| 588 | return -ENOMEM; |
| 589 | |
| 590 | bdev->clk = devm_clk_get(&serdev->dev, "ref"); |
| 591 | if (IS_ERR(bdev->clk)) |
| 592 | return PTR_ERR(bdev->clk); |
| 593 | |
| 594 | bdev->serdev = serdev; |
| 595 | serdev_device_set_drvdata(serdev, bdev); |
| 596 | |
| 597 | serdev_device_set_client_ops(serdev, &btmtkuart_client_ops); |
| 598 | |
| 599 | INIT_WORK(&bdev->tx_work, btmtkuart_tx_work); |
| 600 | skb_queue_head_init(&bdev->txq); |
| 601 | |
| 602 | /* Initialize and register HCI device */ |
| 603 | hdev = hci_alloc_dev(); |
| 604 | if (!hdev) { |
| 605 | dev_err(&serdev->dev, "Can't allocate HCI device\n"); |
| 606 | return -ENOMEM; |
| 607 | } |
| 608 | |
| 609 | bdev->hdev = hdev; |
| 610 | |
| 611 | hdev->bus = HCI_UART; |
| 612 | hci_set_drvdata(hdev, bdev); |
| 613 | |
| 614 | hdev->open = btmtkuart_open; |
| 615 | hdev->close = btmtkuart_close; |
| 616 | hdev->flush = btmtkuart_flush; |
| 617 | hdev->setup = btmtkuart_setup; |
| 618 | hdev->shutdown = btmtkuart_shutdown; |
| 619 | hdev->send = btmtkuart_send_frame; |
| 620 | SET_HCIDEV_DEV(hdev, &serdev->dev); |
| 621 | |
| 622 | hdev->manufacturer = 70; |
| 623 | set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); |
| 624 | |
| 625 | if (hci_register_dev(hdev) < 0) { |
| 626 | dev_err(&serdev->dev, "Can't register HCI device\n"); |
| 627 | hci_free_dev(hdev); |
| 628 | return -ENODEV; |
| 629 | } |
| 630 | |
| 631 | return 0; |
| 632 | } |
| 633 | |
| 634 | static void btmtkuart_remove(struct serdev_device *serdev) |
| 635 | { |
| 636 | struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); |
| 637 | struct hci_dev *hdev = bdev->hdev; |
| 638 | |
| 639 | hci_unregister_dev(hdev); |
| 640 | hci_free_dev(hdev); |
| 641 | } |
| 642 | |
| 643 | #ifdef CONFIG_OF |
| 644 | static const struct of_device_id mtk_of_match_table[] = { |
| 645 | { .compatible = "mediatek,mt7622-bluetooth"}, |
| 646 | { } |
| 647 | }; |
| 648 | MODULE_DEVICE_TABLE(of, mtk_of_match_table); |
| 649 | #endif |
| 650 | |
| 651 | static struct serdev_device_driver btmtkuart_driver = { |
| 652 | .probe = btmtkuart_probe, |
| 653 | .remove = btmtkuart_remove, |
| 654 | .driver = { |
| 655 | .name = "btmtkuart", |
| 656 | .of_match_table = of_match_ptr(mtk_of_match_table), |
| 657 | }, |
| 658 | }; |
| 659 | |
| 660 | module_serdev_device_driver(btmtkuart_driver); |
| 661 | |
| 662 | MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); |
| 663 | MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION); |
| 664 | MODULE_VERSION(VERSION); |
| 665 | MODULE_LICENSE("GPL"); |
| 666 | MODULE_FIRMWARE(FIRMWARE_MT7622); |