blob: 9392cc7f9908fd3780ca6e193fd3dd9962d6a1a8 [file] [log] [blame]
Thomas Gleixner45051532019-05-29 16:57:47 -07001// SPDX-License-Identifier: GPL-2.0-only
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07002/*
3 * Bluetooth Software UART Qualcomm protocol
4 *
5 * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
6 * protocol extension to H4.
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05309 * Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -070010 *
11 * Acknowledgements:
12 * This file is based on hci_ll.c, which was...
13 * Written by Ohad Ben-Cohen <ohad@bencohen.org>
14 * which was in turn based on hci_h4.c, which was written
15 * by Maxim Krasnyansky and Marcel Holtmann.
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -070016 */
17
18#include <linux/kernel.h>
Thierry Escande05ba5332018-03-29 21:15:24 +020019#include <linux/clk.h>
Matthias Kaehlcke2faa3f12019-05-21 12:53:07 -070020#include <linux/completion.h>
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -070021#include <linux/debugfs.h>
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +053022#include <linux/delay.h>
Balakrishna Godavarthid8415022020-01-02 20:19:11 +053023#include <linux/devcoredump.h>
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +053024#include <linux/device.h>
Thierry Escande05ba5332018-03-29 21:15:24 +020025#include <linux/gpio/consumer.h>
26#include <linux/mod_devicetable.h>
27#include <linux/module.h>
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +053028#include <linux/of_device.h>
29#include <linux/platform_device.h>
30#include <linux/regulator/consumer.h>
Thierry Escande05ba5332018-03-29 21:15:24 +020031#include <linux/serdev.h>
Balakrishna Godavarthic614ca32018-10-16 19:51:35 +053032#include <asm/unaligned.h>
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
37#include "hci_uart.h"
38#include "btqca.h"
39
40/* HCI_IBS protocol messages */
41#define HCI_IBS_SLEEP_IND 0xFE
42#define HCI_IBS_WAKE_IND 0xFD
43#define HCI_IBS_WAKE_ACK 0xFC
Marcel Holtmannf81b0012015-08-30 23:05:32 +020044#define HCI_MAX_IBS_SIZE 10
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -070045
Marcel Holtmannf81b0012015-08-30 23:05:32 +020046#define IBS_WAKE_RETRANS_TIMEOUT_MS 100
Claire Chang41d5b252019-10-31 18:46:14 +080047#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 40
48#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
Matthias Kaehlcke94d66712019-02-27 15:52:23 -080049#define CMD_TRANS_TIMEOUT_MS 100
Balakrishna Godavarthid8415022020-01-02 20:19:11 +053050#define MEMDUMP_TIMEOUT_MS 8000
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -070051
Thierry Escande05ba5332018-03-29 21:15:24 +020052/* susclk rate */
53#define SUSCLK_RATE_32KHZ 32768
54
Balakrishna Godavarthic614ca32018-10-16 19:51:35 +053055/* Controller debug log header */
56#define QCA_DEBUG_HANDLE 0x2EDC
57
Balakrishna Godavarthid8415022020-01-02 20:19:11 +053058/* Controller dump header */
59#define QCA_SSR_DUMP_HANDLE 0x0108
60#define QCA_DUMP_PACKET_SIZE 255
61#define QCA_LAST_SEQUENCE_NUM 0xFFFF
62#define QCA_CRASHBYTE_PACKET_LEN 1096
63#define QCA_MEMDUMP_BYTE 0xFB
64
Matthias Kaehlcke62a91992019-04-29 16:21:30 -070065enum qca_flags {
66 QCA_IBS_ENABLED,
Matthias Kaehlcke2faa3f12019-05-21 12:53:07 -070067 QCA_DROP_VENDOR_EVENT,
Claire Chang41d5b252019-10-31 18:46:14 +080068 QCA_SUSPENDING,
Balakrishna Godavarthid8415022020-01-02 20:19:11 +053069 QCA_MEMDUMP_COLLECTION
Matthias Kaehlcke62a91992019-04-29 16:21:30 -070070};
71
Balakrishna Godavarthid8415022020-01-02 20:19:11 +053072
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -070073/* HCI_IBS transmit side sleep protocol states */
74enum tx_ibs_states {
75 HCI_IBS_TX_ASLEEP,
76 HCI_IBS_TX_WAKING,
77 HCI_IBS_TX_AWAKE,
78};
79
80/* HCI_IBS receive side sleep protocol states */
81enum rx_states {
82 HCI_IBS_RX_ASLEEP,
83 HCI_IBS_RX_AWAKE,
84};
85
86/* HCI_IBS transmit and receive side clock state vote */
87enum hci_ibs_clock_state_vote {
88 HCI_IBS_VOTE_STATS_UPDATE,
89 HCI_IBS_TX_VOTE_CLOCK_ON,
90 HCI_IBS_TX_VOTE_CLOCK_OFF,
91 HCI_IBS_RX_VOTE_CLOCK_ON,
92 HCI_IBS_RX_VOTE_CLOCK_OFF,
93};
94
Balakrishna Godavarthid8415022020-01-02 20:19:11 +053095/* Controller memory dump states */
96enum qca_memdump_states {
97 QCA_MEMDUMP_IDLE,
98 QCA_MEMDUMP_COLLECTING,
99 QCA_MEMDUMP_COLLECTED,
100 QCA_MEMDUMP_TIMEOUT,
101};
102
103struct qca_memdump_data {
104 char *memdump_buf_head;
105 char *memdump_buf_tail;
106 u32 current_seq_no;
107 u32 received_dump;
108};
109
110struct qca_memdump_event_hdr {
111 __u8 evt;
112 __u8 plen;
113 __u16 opcode;
114 __u16 seq_no;
115 __u8 reserved;
116} __packed;
117
118
119struct qca_dump_size {
120 u32 dump_size;
121} __packed;
122
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700123struct qca_data {
124 struct hci_uart *hu;
125 struct sk_buff *rx_skb;
126 struct sk_buff_head txq;
127 struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */
Balakrishna Godavarthid8415022020-01-02 20:19:11 +0530128 struct sk_buff_head rx_memdump_q; /* Memdump wait queue */
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700129 spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
130 u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
131 u8 rx_ibs_state; /* HCI_IBS receive side power state */
Viresh Kumar621a5f72015-09-26 15:04:07 -0700132 bool tx_vote; /* Clock must be on for TX */
133 bool rx_vote; /* Clock must be on for RX */
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700134 struct timer_list tx_idle_timer;
135 u32 tx_idle_delay;
136 struct timer_list wake_retrans_timer;
137 u32 wake_retrans;
Balakrishna Godavarthid8415022020-01-02 20:19:11 +0530138 struct timer_list memdump_timer;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700139 struct workqueue_struct *workqueue;
140 struct work_struct ws_awake_rx;
141 struct work_struct ws_awake_device;
142 struct work_struct ws_rx_vote_off;
143 struct work_struct ws_tx_vote_off;
Balakrishna Godavarthid8415022020-01-02 20:19:11 +0530144 struct work_struct ctrl_memdump_evt;
145 struct qca_memdump_data *qca_memdump;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700146 unsigned long flags;
Matthias Kaehlcke2faa3f12019-05-21 12:53:07 -0700147 struct completion drop_ev_comp;
Claire Chang41d5b252019-10-31 18:46:14 +0800148 wait_queue_head_t suspend_wait_q;
Balakrishna Godavarthid8415022020-01-02 20:19:11 +0530149 enum qca_memdump_states memdump_state;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700150
151 /* For debugging purpose */
152 u64 ibs_sent_wacks;
153 u64 ibs_sent_slps;
154 u64 ibs_sent_wakes;
155 u64 ibs_recv_wacks;
156 u64 ibs_recv_slps;
157 u64 ibs_recv_wakes;
158 u64 vote_last_jif;
159 u32 vote_on_ms;
160 u32 vote_off_ms;
161 u64 tx_votes_on;
162 u64 rx_votes_on;
163 u64 tx_votes_off;
164 u64 rx_votes_off;
165 u64 votes_on;
166 u64 votes_off;
167};
168
Balakrishna Godavarthi83d9c5e2018-08-03 17:46:29 +0530169enum qca_speed_type {
170 QCA_INIT_SPEED = 1,
171 QCA_OPER_SPEED
172};
173
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +0530174/*
175 * Voltage regulator information required for configuring the
176 * QCA Bluetooth chipset
177 */
178struct qca_vreg {
179 const char *name;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +0530180 unsigned int load_uA;
181};
182
183struct qca_vreg_data {
184 enum qca_btsoc_type soc_type;
185 struct qca_vreg *vregs;
186 size_t num_vregs;
187};
188
189/*
190 * Platform data for the QCA Bluetooth power driver.
191 */
192struct qca_power {
193 struct device *dev;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +0530194 struct regulator_bulk_data *vreg_bulk;
Bjorn Andersson163d42f2019-10-17 22:24:03 -0700195 int num_vregs;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +0530196 bool vregs_on;
197};
198
Thierry Escande05ba5332018-03-29 21:15:24 +0200199struct qca_serdev {
200 struct hci_uart serdev_hu;
201 struct gpio_desc *bt_en;
202 struct clk *susclk;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +0530203 enum qca_btsoc_type btsoc_type;
204 struct qca_power *bt_power;
205 u32 init_speed;
206 u32 oper_speed;
Rocky Liao99c905c2019-06-06 17:40:30 +0800207 const char *firmware_name;
Thierry Escande05ba5332018-03-29 21:15:24 +0200208};
209
Bjorn Anderssona9314e72019-10-17 22:24:04 -0700210static int qca_regulator_enable(struct qca_serdev *qcadev);
211static void qca_regulator_disable(struct qca_serdev *qcadev);
Balakrishna Godavarthic2d78272018-08-22 17:50:05 +0530212static void qca_power_shutdown(struct hci_uart *hu);
Balakrishna Godavarthi3e4be652018-09-24 20:14:45 +0530213static int qca_power_off(struct hci_dev *hdev);
Balakrishna Godavarthid8415022020-01-02 20:19:11 +0530214static void qca_controller_memdump(struct work_struct *work);
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +0530215
Matthias Kaehlcke4fdd5a42019-03-11 11:38:31 -0700216static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu)
217{
218 enum qca_btsoc_type soc_type;
219
220 if (hu->serdev) {
221 struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
222
223 soc_type = qsd->btsoc_type;
224 } else {
225 soc_type = QCA_ROME;
226 }
227
228 return soc_type;
229}
230
Rocky Liao99c905c2019-06-06 17:40:30 +0800231static const char *qca_get_firmware_name(struct hci_uart *hu)
232{
233 if (hu->serdev) {
234 struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
235
236 return qsd->firmware_name;
237 } else {
238 return NULL;
239 }
240}
241
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700242static void __serial_clock_on(struct tty_struct *tty)
243{
244 /* TODO: Some chipset requires to enable UART clock on client
245 * side to save power consumption or manual work is required.
246 * Please put your code to control UART clock here if needed
247 */
248}
249
250static void __serial_clock_off(struct tty_struct *tty)
251{
252 /* TODO: Some chipset requires to disable UART clock on client
253 * side to save power consumption or manual work is required.
254 * Please put your code to control UART clock off here if needed
255 */
256}
257
258/* serial_clock_vote needs to be called with the ibs lock held */
259static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
260{
261 struct qca_data *qca = hu->priv;
262 unsigned int diff;
263
264 bool old_vote = (qca->tx_vote | qca->rx_vote);
265 bool new_vote;
266
267 switch (vote) {
268 case HCI_IBS_VOTE_STATS_UPDATE:
269 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
270
271 if (old_vote)
272 qca->vote_off_ms += diff;
273 else
274 qca->vote_on_ms += diff;
275 return;
276
277 case HCI_IBS_TX_VOTE_CLOCK_ON:
278 qca->tx_vote = true;
279 qca->tx_votes_on++;
280 new_vote = true;
281 break;
282
283 case HCI_IBS_RX_VOTE_CLOCK_ON:
284 qca->rx_vote = true;
285 qca->rx_votes_on++;
286 new_vote = true;
287 break;
288
289 case HCI_IBS_TX_VOTE_CLOCK_OFF:
290 qca->tx_vote = false;
291 qca->tx_votes_off++;
292 new_vote = qca->rx_vote | qca->tx_vote;
293 break;
294
295 case HCI_IBS_RX_VOTE_CLOCK_OFF:
296 qca->rx_vote = false;
297 qca->rx_votes_off++;
298 new_vote = qca->rx_vote | qca->tx_vote;
299 break;
300
301 default:
302 BT_ERR("Voting irregularity");
303 return;
304 }
305
306 if (new_vote != old_vote) {
307 if (new_vote)
308 __serial_clock_on(hu->tty);
309 else
310 __serial_clock_off(hu->tty);
311
Prasanna Karthikce26d812015-09-15 12:19:45 +0000312 BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
313 vote ? "true" : "false");
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700314
315 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
316
317 if (new_vote) {
318 qca->votes_on++;
319 qca->vote_off_ms += diff;
320 } else {
321 qca->votes_off++;
322 qca->vote_on_ms += diff;
323 }
324 qca->vote_last_jif = jiffies;
325 }
326}
327
328/* Builds and sends an HCI_IBS command packet.
329 * These are very simple packets with only 1 cmd byte.
330 */
331static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
332{
333 int err = 0;
334 struct sk_buff *skb = NULL;
335 struct qca_data *qca = hu->priv;
336
337 BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
338
339 skb = bt_skb_alloc(1, GFP_ATOMIC);
340 if (!skb) {
341 BT_ERR("Failed to allocate memory for HCI_IBS packet");
342 return -ENOMEM;
343 }
344
345 /* Assign HCI_IBS type */
Johannes Berg634fef62017-06-16 14:29:24 +0200346 skb_put_u8(skb, cmd);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700347
348 skb_queue_tail(&qca->txq, skb);
349
350 return err;
351}
352
353static void qca_wq_awake_device(struct work_struct *work)
354{
355 struct qca_data *qca = container_of(work, struct qca_data,
356 ws_awake_device);
357 struct hci_uart *hu = qca->hu;
358 unsigned long retrans_delay;
Harish Bandi31fb1bb2019-09-04 10:04:16 +0530359 unsigned long flags;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700360
361 BT_DBG("hu %p wq awake device", hu);
362
363 /* Vote for serial clock */
364 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
365
Harish Bandi31fb1bb2019-09-04 10:04:16 +0530366 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700367
368 /* Send wake indication to device */
369 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
370 BT_ERR("Failed to send WAKE to device");
371
372 qca->ibs_sent_wakes++;
373
374 /* Start retransmit timer */
375 retrans_delay = msecs_to_jiffies(qca->wake_retrans);
376 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
377
Harish Bandi31fb1bb2019-09-04 10:04:16 +0530378 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700379
380 /* Actually send the packets */
381 hci_uart_tx_wakeup(hu);
382}
383
384static void qca_wq_awake_rx(struct work_struct *work)
385{
386 struct qca_data *qca = container_of(work, struct qca_data,
387 ws_awake_rx);
388 struct hci_uart *hu = qca->hu;
Harish Bandi31fb1bb2019-09-04 10:04:16 +0530389 unsigned long flags;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700390
391 BT_DBG("hu %p wq awake rx", hu);
392
393 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
394
Harish Bandi31fb1bb2019-09-04 10:04:16 +0530395 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700396 qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
397
398 /* Always acknowledge device wake up,
399 * sending IBS message doesn't count as TX ON.
400 */
401 if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
402 BT_ERR("Failed to acknowledge device wake up");
403
404 qca->ibs_sent_wacks++;
405
Harish Bandi31fb1bb2019-09-04 10:04:16 +0530406 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700407
408 /* Actually send the packets */
409 hci_uart_tx_wakeup(hu);
410}
411
412static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
413{
414 struct qca_data *qca = container_of(work, struct qca_data,
415 ws_rx_vote_off);
416 struct hci_uart *hu = qca->hu;
417
418 BT_DBG("hu %p rx clock vote off", hu);
419
420 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
421}
422
423static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
424{
425 struct qca_data *qca = container_of(work, struct qca_data,
426 ws_tx_vote_off);
427 struct hci_uart *hu = qca->hu;
428
429 BT_DBG("hu %p tx clock vote off", hu);
430
431 /* Run HCI tx handling unlocked */
432 hci_uart_tx_wakeup(hu);
433
434 /* Now that message queued to tty driver, vote for tty clocks off.
435 * It is up to the tty driver to pend the clocks off until tx done.
436 */
437 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
438}
439
Kees Cook04356052017-10-04 17:54:29 -0700440static void hci_ibs_tx_idle_timeout(struct timer_list *t)
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700441{
Kees Cook04356052017-10-04 17:54:29 -0700442 struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
443 struct hci_uart *hu = qca->hu;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700444 unsigned long flags;
445
446 BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
447
448 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
449 flags, SINGLE_DEPTH_NESTING);
450
451 switch (qca->tx_ibs_state) {
452 case HCI_IBS_TX_AWAKE:
453 /* TX_IDLE, go to SLEEP */
454 if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
455 BT_ERR("Failed to send SLEEP to device");
456 break;
457 }
458 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
459 qca->ibs_sent_slps++;
460 queue_work(qca->workqueue, &qca->ws_tx_vote_off);
461 break;
462
463 case HCI_IBS_TX_ASLEEP:
464 case HCI_IBS_TX_WAKING:
465 /* Fall through */
466
467 default:
Colin Ian Kinge059a462017-02-17 19:58:10 +0000468 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700469 break;
470 }
471
472 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
473}
474
Kees Cook04356052017-10-04 17:54:29 -0700475static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700476{
Kees Cook04356052017-10-04 17:54:29 -0700477 struct qca_data *qca = from_timer(qca, t, wake_retrans_timer);
478 struct hci_uart *hu = qca->hu;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700479 unsigned long flags, retrans_delay;
Prasanna Karthika9137182015-09-28 08:03:24 +0000480 bool retransmit = false;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700481
482 BT_DBG("hu %p wake retransmit timeout in %d state",
483 hu, qca->tx_ibs_state);
484
485 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
486 flags, SINGLE_DEPTH_NESTING);
487
Claire Chang41d5b252019-10-31 18:46:14 +0800488 /* Don't retransmit the HCI_IBS_WAKE_IND when suspending. */
489 if (test_bit(QCA_SUSPENDING, &qca->flags)) {
490 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
491 return;
492 }
493
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700494 switch (qca->tx_ibs_state) {
495 case HCI_IBS_TX_WAKING:
496 /* No WAKE_ACK, retransmit WAKE */
Prasanna Karthika9137182015-09-28 08:03:24 +0000497 retransmit = true;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700498 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
499 BT_ERR("Failed to acknowledge device wake up");
500 break;
501 }
502 qca->ibs_sent_wakes++;
503 retrans_delay = msecs_to_jiffies(qca->wake_retrans);
504 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
505 break;
506
507 case HCI_IBS_TX_ASLEEP:
508 case HCI_IBS_TX_AWAKE:
509 /* Fall through */
510
511 default:
Colin Ian Kinge059a462017-02-17 19:58:10 +0000512 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700513 break;
514 }
515
516 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
517
518 if (retransmit)
519 hci_uart_tx_wakeup(hu);
520}
521
Balakrishna Godavarthid8415022020-01-02 20:19:11 +0530522static void hci_memdump_timeout(struct timer_list *t)
523{
524 struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
525 struct hci_uart *hu = qca->hu;
526 struct qca_memdump_data *qca_memdump = qca->qca_memdump;
527 char *memdump_buf = qca_memdump->memdump_buf_tail;
528
529 bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
530 /* Inject hw error event to reset the device and driver. */
531 hci_reset_dev(hu->hdev);
532 kfree(memdump_buf);
533 kfree(qca_memdump);
534 qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
535 del_timer(&qca->memdump_timer);
536 cancel_work_sync(&qca->ctrl_memdump_evt);
537}
538
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700539/* Initialize protocol */
540static int qca_open(struct hci_uart *hu)
541{
Thierry Escande05ba5332018-03-29 21:15:24 +0200542 struct qca_serdev *qcadev;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700543 struct qca_data *qca;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +0530544 int ret;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700545
546 BT_DBG("hu %p qca_open", hu);
547
Vladis Dronovb36a1552019-07-30 11:33:45 +0200548 if (!hci_uart_has_flow_control(hu))
549 return -EOPNOTSUPP;
550
Jia-Ju Bai25a13e382018-07-23 11:56:51 +0800551 qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700552 if (!qca)
553 return -ENOMEM;
554
555 skb_queue_head_init(&qca->txq);
556 skb_queue_head_init(&qca->tx_wait_q);
Balakrishna Godavarthid8415022020-01-02 20:19:11 +0530557 skb_queue_head_init(&qca->rx_memdump_q);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700558 spin_lock_init(&qca->hci_ibs_lock);
Bhaktipriya Shridharfac9a602016-08-30 22:42:53 +0530559 qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700560 if (!qca->workqueue) {
561 BT_ERR("QCA Workqueue not initialized properly");
562 kfree(qca);
563 return -ENOMEM;
564 }
565
566 INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
567 INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
568 INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
569 INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
Balakrishna Godavarthid8415022020-01-02 20:19:11 +0530570 INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
Claire Chang41d5b252019-10-31 18:46:14 +0800571 init_waitqueue_head(&qca->suspend_wait_q);
572
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700573 qca->hu = hu;
Matthias Kaehlcke2faa3f12019-05-21 12:53:07 -0700574 init_completion(&qca->drop_ev_comp);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700575
576 /* Assume we start with both sides asleep -- extra wakes OK */
577 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
578 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
579
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700580 qca->vote_last_jif = jiffies;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700581
582 hu->priv = qca;
583
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +0530584 if (hu->serdev) {
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +0530585
586 qcadev = serdev_device_get_drvdata(hu->serdev);
Harish Bandi523760b2019-04-26 19:26:01 +0530587 if (!qca_is_wcn399x(qcadev->btsoc_type)) {
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +0530588 gpiod_set_value_cansleep(qcadev->bt_en, 1);
Balakrishna Godavarthi7f09d5a2019-04-01 15:19:08 +0530589 /* Controller needs time to bootup. */
590 msleep(150);
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +0530591 } else {
592 hu->init_speed = qcadev->init_speed;
593 hu->oper_speed = qcadev->oper_speed;
Bjorn Anderssona9314e72019-10-17 22:24:04 -0700594 ret = qca_regulator_enable(qcadev);
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +0530595 if (ret) {
596 destroy_workqueue(qca->workqueue);
597 kfree_skb(qca->rx_skb);
598 hu->priv = NULL;
599 kfree(qca);
600 return ret;
601 }
602 }
603 }
604
Kees Cook04356052017-10-04 17:54:29 -0700605 timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700606 qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
607
Kees Cook04356052017-10-04 17:54:29 -0700608 timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
Claire Chang41d5b252019-10-31 18:46:14 +0800609 qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
Balakrishna Godavarthid8415022020-01-02 20:19:11 +0530610 timer_setup(&qca->memdump_timer, hci_memdump_timeout, 0);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700611
612 BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
613 qca->tx_idle_delay, qca->wake_retrans);
614
615 return 0;
616}
617
618static void qca_debugfs_init(struct hci_dev *hdev)
619{
620 struct hci_uart *hu = hci_get_drvdata(hdev);
621 struct qca_data *qca = hu->priv;
622 struct dentry *ibs_dir;
623 umode_t mode;
624
625 if (!hdev->debugfs)
626 return;
627
628 ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
629
630 /* read only */
631 mode = S_IRUGO;
632 debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
633 debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
634 debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
635 &qca->ibs_sent_slps);
636 debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
637 &qca->ibs_sent_wakes);
638 debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
639 &qca->ibs_sent_wacks);
640 debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
641 &qca->ibs_recv_slps);
642 debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
643 &qca->ibs_recv_wakes);
644 debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
645 &qca->ibs_recv_wacks);
Ben YoungTae Kim10be6c02015-08-13 22:09:42 -0700646 debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700647 debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
648 debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
Ben YoungTae Kim10be6c02015-08-13 22:09:42 -0700649 debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700650 debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
651 debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
652 debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
653 debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
654 debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
655 debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
656
657 /* read/write */
658 mode = S_IRUGO | S_IWUSR;
659 debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
660 debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
661 &qca->tx_idle_delay);
662}
663
664/* Flush protocol data */
665static int qca_flush(struct hci_uart *hu)
666{
667 struct qca_data *qca = hu->priv;
668
669 BT_DBG("hu %p qca flush", hu);
670
671 skb_queue_purge(&qca->tx_wait_q);
672 skb_queue_purge(&qca->txq);
673
674 return 0;
675}
676
677/* Close protocol */
678static int qca_close(struct hci_uart *hu)
679{
Thierry Escande05ba5332018-03-29 21:15:24 +0200680 struct qca_serdev *qcadev;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700681 struct qca_data *qca = hu->priv;
682
683 BT_DBG("hu %p qca close", hu);
684
685 serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
686
687 skb_queue_purge(&qca->tx_wait_q);
688 skb_queue_purge(&qca->txq);
Balakrishna Godavarthid8415022020-01-02 20:19:11 +0530689 skb_queue_purge(&qca->rx_memdump_q);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700690 del_timer(&qca->tx_idle_timer);
691 del_timer(&qca->wake_retrans_timer);
Balakrishna Godavarthid8415022020-01-02 20:19:11 +0530692 del_timer(&qca->memdump_timer);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700693 destroy_workqueue(qca->workqueue);
694 qca->hu = NULL;
695
Thierry Escande05ba5332018-03-29 21:15:24 +0200696 if (hu->serdev) {
Thierry Escande05ba5332018-03-29 21:15:24 +0200697 qcadev = serdev_device_get_drvdata(hu->serdev);
Harish Bandi523760b2019-04-26 19:26:01 +0530698 if (qca_is_wcn399x(qcadev->btsoc_type))
Balakrishna Godavarthic2d78272018-08-22 17:50:05 +0530699 qca_power_shutdown(hu);
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +0530700 else
701 gpiod_set_value_cansleep(qcadev->bt_en, 0);
702
Thierry Escande05ba5332018-03-29 21:15:24 +0200703 }
704
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700705 kfree_skb(qca->rx_skb);
706
707 hu->priv = NULL;
708
709 kfree(qca);
710
711 return 0;
712}
713
714/* Called upon a wake-up-indication from the device.
715 */
716static void device_want_to_wakeup(struct hci_uart *hu)
717{
718 unsigned long flags;
719 struct qca_data *qca = hu->priv;
720
721 BT_DBG("hu %p want to wake up", hu);
722
723 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
724
725 qca->ibs_recv_wakes++;
726
Claire Chang41d5b252019-10-31 18:46:14 +0800727 /* Don't wake the rx up when suspending. */
728 if (test_bit(QCA_SUSPENDING, &qca->flags)) {
729 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
730 return;
731 }
732
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700733 switch (qca->rx_ibs_state) {
734 case HCI_IBS_RX_ASLEEP:
735 /* Make sure clock is on - we may have turned clock off since
736 * receiving the wake up indicator awake rx clock.
737 */
738 queue_work(qca->workqueue, &qca->ws_awake_rx);
739 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
740 return;
741
742 case HCI_IBS_RX_AWAKE:
743 /* Always acknowledge device wake up,
744 * sending IBS message doesn't count as TX ON.
745 */
746 if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
747 BT_ERR("Failed to acknowledge device wake up");
748 break;
749 }
750 qca->ibs_sent_wacks++;
751 break;
752
753 default:
754 /* Any other state is illegal */
755 BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
756 qca->rx_ibs_state);
757 break;
758 }
759
760 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
761
762 /* Actually send the packets */
763 hci_uart_tx_wakeup(hu);
764}
765
766/* Called upon a sleep-indication from the device.
767 */
768static void device_want_to_sleep(struct hci_uart *hu)
769{
770 unsigned long flags;
771 struct qca_data *qca = hu->priv;
772
Rocky Liao6600c082019-08-14 15:42:39 +0800773 BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700774
775 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
776
777 qca->ibs_recv_slps++;
778
779 switch (qca->rx_ibs_state) {
780 case HCI_IBS_RX_AWAKE:
781 /* Update state */
782 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
783 /* Vote off rx clock under workqueue */
784 queue_work(qca->workqueue, &qca->ws_rx_vote_off);
785 break;
786
787 case HCI_IBS_RX_ASLEEP:
Rocky Liao6600c082019-08-14 15:42:39 +0800788 break;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700789
790 default:
791 /* Any other state is illegal */
792 BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
793 qca->rx_ibs_state);
794 break;
795 }
796
Claire Chang41d5b252019-10-31 18:46:14 +0800797 wake_up_interruptible(&qca->suspend_wait_q);
798
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700799 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
800}
801
802/* Called upon wake-up-acknowledgement from the device
803 */
804static void device_woke_up(struct hci_uart *hu)
805{
806 unsigned long flags, idle_delay;
807 struct qca_data *qca = hu->priv;
808 struct sk_buff *skb = NULL;
809
810 BT_DBG("hu %p woke up", hu);
811
812 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
813
814 qca->ibs_recv_wacks++;
815
Claire Chang41d5b252019-10-31 18:46:14 +0800816 /* Don't react to the wake-up-acknowledgment when suspending. */
817 if (test_bit(QCA_SUSPENDING, &qca->flags)) {
818 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
819 return;
820 }
821
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700822 switch (qca->tx_ibs_state) {
823 case HCI_IBS_TX_AWAKE:
824 /* Expect one if we send 2 WAKEs */
825 BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
826 qca->tx_ibs_state);
827 break;
828
829 case HCI_IBS_TX_WAKING:
830 /* Send pending packets */
831 while ((skb = skb_dequeue(&qca->tx_wait_q)))
832 skb_queue_tail(&qca->txq, skb);
833
834 /* Switch timers and change state to HCI_IBS_TX_AWAKE */
835 del_timer(&qca->wake_retrans_timer);
836 idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
837 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
838 qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
839 break;
840
841 case HCI_IBS_TX_ASLEEP:
842 /* Fall through */
843
844 default:
845 BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
846 qca->tx_ibs_state);
847 break;
848 }
849
850 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
851
852 /* Actually send the packets */
853 hci_uart_tx_wakeup(hu);
854}
855
856/* Enqueue frame for transmittion (padding, crc, etc) may be called from
857 * two simultaneous tasklets.
858 */
859static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
860{
861 unsigned long flags = 0, idle_delay;
862 struct qca_data *qca = hu->priv;
863
864 BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
865 qca->tx_ibs_state);
866
867 /* Prepend skb with frame type */
Marcel Holtmann618e8bc2015-11-05 07:33:56 +0100868 memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700869
Balakrishna Godavarthi035a9602019-02-04 20:36:43 +0530870 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
871
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700872 /* Don't go to sleep in middle of patch download or
873 * Out-Of-Band(GPIOs control) sleep is selected.
Claire Chang41d5b252019-10-31 18:46:14 +0800874 * Don't wake the device up when suspending.
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700875 */
Claire Chang41d5b252019-10-31 18:46:14 +0800876 if (!test_bit(QCA_IBS_ENABLED, &qca->flags) ||
877 test_bit(QCA_SUSPENDING, &qca->flags)) {
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700878 skb_queue_tail(&qca->txq, skb);
Balakrishna Godavarthi035a9602019-02-04 20:36:43 +0530879 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700880 return 0;
881 }
882
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -0700883 /* Act according to current state */
884 switch (qca->tx_ibs_state) {
885 case HCI_IBS_TX_AWAKE:
886 BT_DBG("Device awake, sending normally");
887 skb_queue_tail(&qca->txq, skb);
888 idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
889 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
890 break;
891
892 case HCI_IBS_TX_ASLEEP:
893 BT_DBG("Device asleep, waking up and queueing packet");
894 /* Save packet for later */
895 skb_queue_tail(&qca->tx_wait_q, skb);
896
897 qca->tx_ibs_state = HCI_IBS_TX_WAKING;
898 /* Schedule a work queue to wake up device */
899 queue_work(qca->workqueue, &qca->ws_awake_device);
900 break;
901
902 case HCI_IBS_TX_WAKING:
903 BT_DBG("Device waking up, queueing packet");
904 /* Transient state; just keep packet for later */
905 skb_queue_tail(&qca->tx_wait_q, skb);
906 break;
907
908 default:
909 BT_ERR("Illegal tx state: %d (losing packet)",
910 qca->tx_ibs_state);
911 kfree_skb(skb);
912 break;
913 }
914
915 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
916
917 return 0;
918}
919
920static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
921{
922 struct hci_uart *hu = hci_get_drvdata(hdev);
923
924 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
925
926 device_want_to_sleep(hu);
927
928 kfree_skb(skb);
929 return 0;
930}
931
932static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
933{
934 struct hci_uart *hu = hci_get_drvdata(hdev);
935
936 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
937
938 device_want_to_wakeup(hu);
939
940 kfree_skb(skb);
941 return 0;
942}
943
944static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
945{
946 struct hci_uart *hu = hci_get_drvdata(hdev);
947
948 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
949
950 device_woke_up(hu);
951
952 kfree_skb(skb);
953 return 0;
954}
955
Balakrishna Godavarthic614ca32018-10-16 19:51:35 +0530956static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb)
957{
958 /* We receive debug logs from chip as an ACL packets.
959 * Instead of sending the data to ACL to decode the
960 * received data, we are pushing them to the above layers
961 * as a diagnostic packet.
962 */
963 if (get_unaligned_le16(skb->data) == QCA_DEBUG_HANDLE)
964 return hci_recv_diag(hdev, skb);
965
966 return hci_recv_frame(hdev, skb);
967}
968
Balakrishna Godavarthid8415022020-01-02 20:19:11 +0530969static void qca_controller_memdump(struct work_struct *work)
970{
971 struct qca_data *qca = container_of(work, struct qca_data,
972 ctrl_memdump_evt);
973 struct hci_uart *hu = qca->hu;
974 struct sk_buff *skb;
975 struct qca_memdump_event_hdr *cmd_hdr;
976 struct qca_memdump_data *qca_memdump = qca->qca_memdump;
977 struct qca_dump_size *dump;
978 char *memdump_buf;
979 char nullBuff[QCA_DUMP_PACKET_SIZE] = { 0 };
980 u16 opcode, seq_no;
981 u32 dump_size;
982
983 while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
984
985 if (!qca_memdump) {
986 qca_memdump = kzalloc(sizeof(struct qca_memdump_data),
987 GFP_ATOMIC);
988 if (!qca_memdump)
989 return;
990
991 qca->qca_memdump = qca_memdump;
992 }
993
994 qca->memdump_state = QCA_MEMDUMP_COLLECTING;
995 cmd_hdr = (void *) skb->data;
996 opcode = __le16_to_cpu(cmd_hdr->opcode);
997 seq_no = __le16_to_cpu(cmd_hdr->seq_no);
998 skb_pull(skb, sizeof(struct qca_memdump_event_hdr));
999
1000 if (!seq_no) {
1001
1002 /* This is the first frame of memdump packet from
1003 * the controller, Disable IBS to recevie dump
1004 * with out any interruption, ideally time required for
1005 * the controller to send the dump is 8 seconds. let us
1006 * start timer to handle this asynchronous activity.
1007 */
1008 clear_bit(QCA_IBS_ENABLED, &qca->flags);
1009 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1010 dump = (void *) skb->data;
1011 dump_size = __le32_to_cpu(dump->dump_size);
1012 if (!(dump_size)) {
1013 bt_dev_err(hu->hdev, "Rx invalid memdump size");
1014 kfree_skb(skb);
1015 return;
1016 }
1017
1018 bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
1019 dump_size);
1020 mod_timer(&qca->memdump_timer, (jiffies +
1021 msecs_to_jiffies(MEMDUMP_TIMEOUT_MS)));
1022
1023 skb_pull(skb, sizeof(dump_size));
1024 memdump_buf = vmalloc(dump_size);
1025 qca_memdump->memdump_buf_head = memdump_buf;
1026 qca_memdump->memdump_buf_tail = memdump_buf;
1027 }
1028
1029 memdump_buf = qca_memdump->memdump_buf_tail;
1030
1031 /* If sequence no 0 is missed then there is no point in
1032 * accepting the other sequences.
1033 */
1034 if (!memdump_buf) {
1035 bt_dev_err(hu->hdev, "QCA: Discarding other packets");
1036 kfree(qca_memdump);
1037 kfree_skb(skb);
1038 qca->qca_memdump = NULL;
1039 return;
1040 }
1041
1042 /* There could be chance of missing some packets from
1043 * the controller. In such cases let us store the dummy
1044 * packets in the buffer.
1045 */
1046 while ((seq_no > qca_memdump->current_seq_no + 1) &&
1047 seq_no != QCA_LAST_SEQUENCE_NUM) {
1048 bt_dev_err(hu->hdev, "QCA controller missed packet:%d",
1049 qca_memdump->current_seq_no);
1050 memcpy(memdump_buf, nullBuff, QCA_DUMP_PACKET_SIZE);
1051 memdump_buf = memdump_buf + QCA_DUMP_PACKET_SIZE;
1052 qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE;
1053 qca_memdump->current_seq_no++;
1054 }
1055
1056 memcpy(memdump_buf, (unsigned char *) skb->data, skb->len);
1057 memdump_buf = memdump_buf + skb->len;
1058 qca_memdump->memdump_buf_tail = memdump_buf;
1059 qca_memdump->current_seq_no = seq_no + 1;
1060 qca_memdump->received_dump += skb->len;
1061 qca->qca_memdump = qca_memdump;
1062 kfree_skb(skb);
1063 if (seq_no == QCA_LAST_SEQUENCE_NUM) {
1064 bt_dev_info(hu->hdev, "QCA writing crash dump of size %d bytes",
1065 qca_memdump->received_dump);
1066 memdump_buf = qca_memdump->memdump_buf_head;
1067 dev_coredumpv(&hu->serdev->dev, memdump_buf,
1068 qca_memdump->received_dump, GFP_KERNEL);
1069 del_timer(&qca->memdump_timer);
1070 kfree(qca->qca_memdump);
1071 qca->qca_memdump = NULL;
1072 qca->memdump_state = QCA_MEMDUMP_COLLECTED;
1073 }
1074 }
1075
1076}
1077
1078int qca_controller_memdump_event(struct hci_dev *hdev, struct sk_buff *skb)
1079{
1080 struct hci_uart *hu = hci_get_drvdata(hdev);
1081 struct qca_data *qca = hu->priv;
1082
1083 skb_queue_tail(&qca->rx_memdump_q, skb);
1084 queue_work(qca->workqueue, &qca->ctrl_memdump_evt);
1085
1086 return 0;
1087}
1088
Matthias Kaehlcke2faa3f12019-05-21 12:53:07 -07001089static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
1090{
1091 struct hci_uart *hu = hci_get_drvdata(hdev);
1092 struct qca_data *qca = hu->priv;
1093
1094 if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) {
1095 struct hci_event_hdr *hdr = (void *)skb->data;
1096
1097 /* For the WCN3990 the vendor command for a baudrate change
1098 * isn't sent as synchronous HCI command, because the
1099 * controller sends the corresponding vendor event with the
1100 * new baudrate. The event is received and properly decoded
1101 * after changing the baudrate of the host port. It needs to
1102 * be dropped, otherwise it can be misinterpreted as
1103 * response to a later firmware download command (also a
1104 * vendor command).
1105 */
1106
1107 if (hdr->evt == HCI_EV_VENDOR)
1108 complete(&qca->drop_ev_comp);
1109
Wei Yongjun4974c832019-07-09 01:35:30 +00001110 kfree_skb(skb);
Matthias Kaehlcke2faa3f12019-05-21 12:53:07 -07001111
1112 return 0;
1113 }
Balakrishna Godavarthid8415022020-01-02 20:19:11 +05301114 /* We receive chip memory dump as an event packet, With a dedicated
1115 * handler followed by a hardware error event. When this event is
1116 * received we store dump into a file before closing hci. This
1117 * dump will help in triaging the issues.
1118 */
1119 if ((skb->data[0] == HCI_VENDOR_PKT) &&
1120 (get_unaligned_be16(skb->data + 2) == QCA_SSR_DUMP_HANDLE))
1121 return qca_controller_memdump_event(hdev, skb);
Matthias Kaehlcke2faa3f12019-05-21 12:53:07 -07001122
1123 return hci_recv_frame(hdev, skb);
1124}
1125
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001126#define QCA_IBS_SLEEP_IND_EVENT \
1127 .type = HCI_IBS_SLEEP_IND, \
1128 .hlen = 0, \
1129 .loff = 0, \
1130 .lsize = 0, \
1131 .maxlen = HCI_MAX_IBS_SIZE
1132
1133#define QCA_IBS_WAKE_IND_EVENT \
1134 .type = HCI_IBS_WAKE_IND, \
1135 .hlen = 0, \
1136 .loff = 0, \
1137 .lsize = 0, \
1138 .maxlen = HCI_MAX_IBS_SIZE
1139
1140#define QCA_IBS_WAKE_ACK_EVENT \
1141 .type = HCI_IBS_WAKE_ACK, \
1142 .hlen = 0, \
1143 .loff = 0, \
1144 .lsize = 0, \
1145 .maxlen = HCI_MAX_IBS_SIZE
1146
1147static const struct h4_recv_pkt qca_recv_pkts[] = {
Balakrishna Godavarthic614ca32018-10-16 19:51:35 +05301148 { H4_RECV_ACL, .recv = qca_recv_acl_data },
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001149 { H4_RECV_SCO, .recv = hci_recv_frame },
Matthias Kaehlcke2faa3f12019-05-21 12:53:07 -07001150 { H4_RECV_EVENT, .recv = qca_recv_event },
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001151 { QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
1152 { QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
1153 { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
1154};
1155
1156static int qca_recv(struct hci_uart *hu, const void *data, int count)
1157{
1158 struct qca_data *qca = hu->priv;
1159
1160 if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
1161 return -EUNATCH;
1162
1163 qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
1164 qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
1165 if (IS_ERR(qca->rx_skb)) {
1166 int err = PTR_ERR(qca->rx_skb);
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001167 bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001168 qca->rx_skb = NULL;
1169 return err;
1170 }
1171
1172 return count;
1173}
1174
1175static struct sk_buff *qca_dequeue(struct hci_uart *hu)
1176{
1177 struct qca_data *qca = hu->priv;
1178
1179 return skb_dequeue(&qca->txq);
1180}
1181
1182static uint8_t qca_get_baudrate_value(int speed)
1183{
Prasanna Karthikce26d812015-09-15 12:19:45 +00001184 switch (speed) {
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001185 case 9600:
1186 return QCA_BAUDRATE_9600;
1187 case 19200:
1188 return QCA_BAUDRATE_19200;
1189 case 38400:
1190 return QCA_BAUDRATE_38400;
1191 case 57600:
1192 return QCA_BAUDRATE_57600;
1193 case 115200:
1194 return QCA_BAUDRATE_115200;
1195 case 230400:
1196 return QCA_BAUDRATE_230400;
1197 case 460800:
1198 return QCA_BAUDRATE_460800;
1199 case 500000:
1200 return QCA_BAUDRATE_500000;
1201 case 921600:
1202 return QCA_BAUDRATE_921600;
1203 case 1000000:
1204 return QCA_BAUDRATE_1000000;
1205 case 2000000:
1206 return QCA_BAUDRATE_2000000;
1207 case 3000000:
1208 return QCA_BAUDRATE_3000000;
Balakrishna Godavarthibe93a492018-08-03 17:46:30 +05301209 case 3200000:
1210 return QCA_BAUDRATE_3200000;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001211 case 3500000:
1212 return QCA_BAUDRATE_3500000;
1213 default:
1214 return QCA_BAUDRATE_115200;
1215 }
1216}
1217
1218static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
1219{
1220 struct hci_uart *hu = hci_get_drvdata(hdev);
1221 struct qca_data *qca = hu->priv;
1222 struct sk_buff *skb;
1223 u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
1224
Balakrishna Godavarthibe93a492018-08-03 17:46:30 +05301225 if (baudrate > QCA_BAUDRATE_3200000)
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001226 return -EINVAL;
1227
1228 cmd[4] = baudrate;
1229
Jia-Ju Bai25a13e382018-07-23 11:56:51 +08001230 skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001231 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001232 bt_dev_err(hdev, "Failed to allocate baudrate packet");
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001233 return -ENOMEM;
1234 }
1235
1236 /* Assign commands to change baudrate and packet type. */
Johannes Berg59ae1d12017-06-16 14:29:20 +02001237 skb_put_data(skb, cmd, sizeof(cmd));
Marcel Holtmann618e8bc2015-11-05 07:33:56 +01001238 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001239
1240 skb_queue_tail(&qca->txq, skb);
1241 hci_uart_tx_wakeup(hu);
1242
Matthias Kaehlcke94d66712019-02-27 15:52:23 -08001243 /* Wait for the baudrate change request to be sent */
1244
1245 while (!skb_queue_empty(&qca->txq))
1246 usleep_range(100, 200);
1247
Matthias Kaehlckeecf2b762019-04-23 11:16:52 -07001248 if (hu->serdev)
1249 serdev_device_wait_until_sent(hu->serdev,
Matthias Kaehlcke94d66712019-02-27 15:52:23 -08001250 msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
1251
1252 /* Give the controller time to process the request */
Harish Bandi523760b2019-04-26 19:26:01 +05301253 if (qca_is_wcn399x(qca_soc_type(hu)))
Matthias Kaehlcke94d66712019-02-27 15:52:23 -08001254 msleep(10);
1255 else
1256 msleep(300);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001257
1258 return 0;
1259}
1260
Thierry Escande05ba5332018-03-29 21:15:24 +02001261static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
1262{
1263 if (hu->serdev)
1264 serdev_device_set_baudrate(hu->serdev, speed);
1265 else
1266 hci_uart_set_baudrate(hu, speed);
1267}
1268
Matthias Kaehlcke9836b802019-02-26 11:46:45 -08001269static int qca_send_power_pulse(struct hci_uart *hu, bool on)
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301270{
Balakrishna Godavarthif9558272019-02-04 20:36:41 +05301271 int ret;
Matthias Kaehlcke94d66712019-02-27 15:52:23 -08001272 int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
Matthias Kaehlcke9836b802019-02-26 11:46:45 -08001273 u8 cmd = on ? QCA_WCN3990_POWERON_PULSE : QCA_WCN3990_POWEROFF_PULSE;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301274
1275 /* These power pulses are single byte command which are sent
1276 * at required baudrate to wcn3990. On wcn3990, we have an external
1277 * circuit at Tx pin which decodes the pulse sent at specific baudrate.
1278 * For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT
1279 * and also we use the same power inputs to turn on and off for
1280 * Wi-Fi/BT. Powering up the power sources will not enable BT, until
1281 * we send a power on pulse at 115200 bps. This algorithm will help to
1282 * save power. Disabling hardware flow control is mandatory while
1283 * sending power pulses to SoC.
1284 */
Balakrishna Godavarthif9558272019-02-04 20:36:41 +05301285 bt_dev_dbg(hu->hdev, "sending power pulse %02x to controller", cmd);
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301286
Balakrishna Godavarthif9558272019-02-04 20:36:41 +05301287 serdev_device_write_flush(hu->serdev);
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301288 hci_uart_set_flow_control(hu, true);
Balakrishna Godavarthif9558272019-02-04 20:36:41 +05301289 ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd));
1290 if (ret < 0) {
1291 bt_dev_err(hu->hdev, "failed to send power pulse %02x", cmd);
1292 return ret;
1293 }
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301294
Balakrishna Godavarthif9558272019-02-04 20:36:41 +05301295 serdev_device_wait_until_sent(hu->serdev, timeout);
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301296 hci_uart_set_flow_control(hu, false);
1297
Matthias Kaehlcke0ebcddd2019-02-26 11:46:47 -08001298 /* Give to controller time to boot/shutdown */
Matthias Kaehlckead571d72019-02-26 11:46:46 -08001299 if (on)
1300 msleep(100);
Matthias Kaehlcke0ebcddd2019-02-26 11:46:47 -08001301 else
1302 msleep(10);
Matthias Kaehlckead571d72019-02-26 11:46:46 -08001303
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301304 return 0;
1305}
1306
Balakrishna Godavarthi83d9c5e2018-08-03 17:46:29 +05301307static unsigned int qca_get_speed(struct hci_uart *hu,
1308 enum qca_speed_type speed_type)
1309{
1310 unsigned int speed = 0;
1311
1312 if (speed_type == QCA_INIT_SPEED) {
1313 if (hu->init_speed)
1314 speed = hu->init_speed;
1315 else if (hu->proto->init_speed)
1316 speed = hu->proto->init_speed;
1317 } else {
1318 if (hu->oper_speed)
1319 speed = hu->oper_speed;
1320 else if (hu->proto->oper_speed)
1321 speed = hu->proto->oper_speed;
1322 }
1323
1324 return speed;
1325}
1326
1327static int qca_check_speeds(struct hci_uart *hu)
1328{
Harish Bandi523760b2019-04-26 19:26:01 +05301329 if (qca_is_wcn399x(qca_soc_type(hu))) {
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301330 if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
1331 !qca_get_speed(hu, QCA_OPER_SPEED))
1332 return -EINVAL;
1333 } else {
1334 if (!qca_get_speed(hu, QCA_INIT_SPEED) ||
1335 !qca_get_speed(hu, QCA_OPER_SPEED))
1336 return -EINVAL;
1337 }
Balakrishna Godavarthi83d9c5e2018-08-03 17:46:29 +05301338
1339 return 0;
1340}
1341
1342static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
1343{
1344 unsigned int speed, qca_baudrate;
Matthias Kaehlcke2faa3f12019-05-21 12:53:07 -07001345 struct qca_data *qca = hu->priv;
Balakrishna Godavarthi78e8fa22019-02-04 20:36:42 +05301346 int ret = 0;
Balakrishna Godavarthi83d9c5e2018-08-03 17:46:29 +05301347
1348 if (speed_type == QCA_INIT_SPEED) {
1349 speed = qca_get_speed(hu, QCA_INIT_SPEED);
1350 if (speed)
1351 host_set_baudrate(hu, speed);
1352 } else {
Matthias Kaehlcke4fdd5a42019-03-11 11:38:31 -07001353 enum qca_btsoc_type soc_type = qca_soc_type(hu);
1354
Balakrishna Godavarthi83d9c5e2018-08-03 17:46:29 +05301355 speed = qca_get_speed(hu, QCA_OPER_SPEED);
1356 if (!speed)
1357 return 0;
1358
Balakrishna Godavarthi78e8fa22019-02-04 20:36:42 +05301359 /* Disable flow control for wcn3990 to deassert RTS while
1360 * changing the baudrate of chip and host.
1361 */
Harish Bandi523760b2019-04-26 19:26:01 +05301362 if (qca_is_wcn399x(soc_type))
Balakrishna Godavarthi78e8fa22019-02-04 20:36:42 +05301363 hci_uart_set_flow_control(hu, true);
1364
Matthias Kaehlcke2faa3f12019-05-21 12:53:07 -07001365 if (soc_type == QCA_WCN3990) {
1366 reinit_completion(&qca->drop_ev_comp);
1367 set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
1368 }
1369
Balakrishna Godavarthi83d9c5e2018-08-03 17:46:29 +05301370 qca_baudrate = qca_get_baudrate_value(speed);
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301371 bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed);
Balakrishna Godavarthi83d9c5e2018-08-03 17:46:29 +05301372 ret = qca_set_baudrate(hu->hdev, qca_baudrate);
1373 if (ret)
Balakrishna Godavarthi78e8fa22019-02-04 20:36:42 +05301374 goto error;
Balakrishna Godavarthi83d9c5e2018-08-03 17:46:29 +05301375
1376 host_set_baudrate(hu, speed);
Balakrishna Godavarthi78e8fa22019-02-04 20:36:42 +05301377
1378error:
Jeffrey Hugobba79fe2019-10-21 07:18:27 -07001379 if (qca_is_wcn399x(soc_type))
Balakrishna Godavarthi78e8fa22019-02-04 20:36:42 +05301380 hci_uart_set_flow_control(hu, false);
Matthias Kaehlcke2faa3f12019-05-21 12:53:07 -07001381
1382 if (soc_type == QCA_WCN3990) {
1383 /* Wait for the controller to send the vendor event
1384 * for the baudrate change command.
1385 */
1386 if (!wait_for_completion_timeout(&qca->drop_ev_comp,
1387 msecs_to_jiffies(100))) {
1388 bt_dev_err(hu->hdev,
1389 "Failed to change controller baudrate\n");
1390 ret = -ETIMEDOUT;
1391 }
1392
1393 clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
1394 }
Balakrishna Godavarthi83d9c5e2018-08-03 17:46:29 +05301395 }
1396
Balakrishna Godavarthi78e8fa22019-02-04 20:36:42 +05301397 return ret;
Balakrishna Godavarthi83d9c5e2018-08-03 17:46:29 +05301398}
1399
Balakrishna Godavarthid8415022020-01-02 20:19:11 +05301400static int qca_send_crashbuffer(struct hci_uart *hu)
1401{
1402 struct qca_data *qca = hu->priv;
1403 struct sk_buff *skb;
1404
1405 skb = bt_skb_alloc(QCA_CRASHBYTE_PACKET_LEN, GFP_KERNEL);
1406 if (!skb) {
1407 bt_dev_err(hu->hdev, "Failed to allocate memory for skb packet");
1408 return -ENOMEM;
1409 }
1410
1411 /* We forcefully crash the controller, by sending 0xfb byte for
1412 * 1024 times. We also might have chance of losing data, To be
1413 * on safer side we send 1096 bytes to the SoC.
1414 */
1415 memset(skb_put(skb, QCA_CRASHBYTE_PACKET_LEN), QCA_MEMDUMP_BYTE,
1416 QCA_CRASHBYTE_PACKET_LEN);
1417 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
1418 bt_dev_info(hu->hdev, "crash the soc to collect controller dump");
1419 skb_queue_tail(&qca->txq, skb);
1420 hci_uart_tx_wakeup(hu);
1421
1422 return 0;
1423}
1424
1425static void qca_wait_for_dump_collection(struct hci_dev *hdev)
1426{
1427 struct hci_uart *hu = hci_get_drvdata(hdev);
1428 struct qca_data *qca = hu->priv;
1429 struct qca_memdump_data *qca_memdump = qca->qca_memdump;
1430 char *memdump_buf = NULL;
1431
1432 wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION,
1433 TASK_UNINTERRUPTIBLE, MEMDUMP_TIMEOUT_MS);
1434
1435 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1436 if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
1437 bt_dev_err(hu->hdev, "Clearing the buffers due to timeout");
1438 if (qca_memdump)
1439 memdump_buf = qca_memdump->memdump_buf_tail;
1440 kfree(memdump_buf);
1441 kfree(qca_memdump);
1442 qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
1443 del_timer(&qca->memdump_timer);
1444 cancel_work_sync(&qca->ctrl_memdump_evt);
1445 }
1446}
1447
1448static void qca_hw_error(struct hci_dev *hdev, u8 code)
1449{
1450 struct hci_uart *hu = hci_get_drvdata(hdev);
1451 struct qca_data *qca = hu->priv;
1452
1453 bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
1454
1455 if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
1456 /* If hardware error event received for other than QCA
1457 * soc memory dump event, then we need to crash the SOC
1458 * and wait here for 8 seconds to get the dump packets.
1459 * This will block main thread to be on hold until we
1460 * collect dump.
1461 */
1462 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1463 qca_send_crashbuffer(hu);
1464 qca_wait_for_dump_collection(hdev);
1465 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
1466 /* Let us wait here until memory dump collected or
1467 * memory dump timer expired.
1468 */
1469 bt_dev_info(hdev, "waiting for dump to complete");
1470 qca_wait_for_dump_collection(hdev);
1471 }
1472}
1473
1474static void qca_cmd_timeout(struct hci_dev *hdev)
1475{
1476 struct hci_uart *hu = hci_get_drvdata(hdev);
1477 struct qca_data *qca = hu->priv;
1478
1479 if (qca->memdump_state == QCA_MEMDUMP_IDLE)
1480 qca_send_crashbuffer(hu);
1481 else
1482 bt_dev_info(hdev, "Dump collection is in process");
1483}
1484
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301485static int qca_wcn3990_init(struct hci_uart *hu)
1486{
Balakrishna Godavarthi3e4be652018-09-24 20:14:45 +05301487 struct qca_serdev *qcadev;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301488 int ret;
1489
Balakrishna Godavarthi3e4be652018-09-24 20:14:45 +05301490 /* Check for vregs status, may be hci down has turned
1491 * off the voltage regulator.
1492 */
1493 qcadev = serdev_device_get_drvdata(hu->serdev);
1494 if (!qcadev->bt_power->vregs_on) {
1495 serdev_device_close(hu->serdev);
Bjorn Anderssona9314e72019-10-17 22:24:04 -07001496 ret = qca_regulator_enable(qcadev);
Balakrishna Godavarthi3e4be652018-09-24 20:14:45 +05301497 if (ret)
1498 return ret;
1499
1500 ret = serdev_device_open(hu->serdev);
1501 if (ret) {
1502 bt_dev_err(hu->hdev, "failed to open port");
1503 return ret;
1504 }
1505 }
1506
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301507 /* Forcefully enable wcn3990 to enter in to boot mode. */
1508 host_set_baudrate(hu, 2400);
Matthias Kaehlcke9836b802019-02-26 11:46:45 -08001509 ret = qca_send_power_pulse(hu, false);
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301510 if (ret)
1511 return ret;
1512
1513 qca_set_speed(hu, QCA_INIT_SPEED);
Matthias Kaehlcke9836b802019-02-26 11:46:45 -08001514 ret = qca_send_power_pulse(hu, true);
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301515 if (ret)
1516 return ret;
1517
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301518 /* Now the device is in ready state to communicate with host.
1519 * To sync host with device we need to reopen port.
1520 * Without this, we will have RTS and CTS synchronization
1521 * issues.
1522 */
1523 serdev_device_close(hu->serdev);
1524 ret = serdev_device_open(hu->serdev);
1525 if (ret) {
1526 bt_dev_err(hu->hdev, "failed to open port");
1527 return ret;
1528 }
1529
1530 hci_uart_set_flow_control(hu, false);
1531
1532 return 0;
1533}
1534
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001535static int qca_setup(struct hci_uart *hu)
1536{
1537 struct hci_dev *hdev = hu->hdev;
1538 struct qca_data *qca = hu->priv;
1539 unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
Matthias Kaehlcke4fdd5a42019-03-11 11:38:31 -07001540 enum qca_btsoc_type soc_type = qca_soc_type(hu);
Rocky Liao99c905c2019-06-06 17:40:30 +08001541 const char *firmware_name = qca_get_firmware_name(hu);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001542 int ret;
Balakrishna Godavarthiaadebac2018-08-03 17:46:28 +05301543 int soc_ver = 0;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001544
Balakrishna Godavarthi83d9c5e2018-08-03 17:46:29 +05301545 ret = qca_check_speeds(hu);
1546 if (ret)
1547 return ret;
1548
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001549 /* Patch downloading has to be done without IBS mode */
Matthias Kaehlcke62a91992019-04-29 16:21:30 -07001550 clear_bit(QCA_IBS_ENABLED, &qca->flags);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001551
Rocky Liaoe14c1672019-08-21 14:23:39 +08001552 /* Enable controller to do both LE scan and BR/EDR inquiry
1553 * simultaneously.
1554 */
1555 set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
1556
Harish Bandi523760b2019-04-26 19:26:01 +05301557 if (qca_is_wcn399x(soc_type)) {
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301558 bt_dev_info(hdev, "setting up wcn3990");
Balakrishna Godavarthi3e4be652018-09-24 20:14:45 +05301559
1560 /* Enable NON_PERSISTENT_SETUP QUIRK to ensure to execute
1561 * setup for every hci up.
1562 */
1563 set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
Matthias Kaehlcke5971752d2019-02-19 12:05:59 -08001564 set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
Balakrishna Godavarthi3e4be652018-09-24 20:14:45 +05301565 hu->hdev->shutdown = qca_power_off;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301566 ret = qca_wcn3990_init(hu);
1567 if (ret)
1568 return ret;
1569
Balakrishna Godavarthi7d250a02019-11-06 15:18:32 +05301570 ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301571 if (ret)
1572 return ret;
1573 } else {
1574 bt_dev_info(hdev, "ROME setup");
1575 qca_set_speed(hu, QCA_INIT_SPEED);
1576 }
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001577
1578 /* Setup user speed if needed */
Balakrishna Godavarthi83d9c5e2018-08-03 17:46:29 +05301579 speed = qca_get_speed(hu, QCA_OPER_SPEED);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001580 if (speed) {
Balakrishna Godavarthi83d9c5e2018-08-03 17:46:29 +05301581 ret = qca_set_speed(hu, QCA_OPER_SPEED);
1582 if (ret)
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001583 return ret;
Balakrishna Godavarthi83d9c5e2018-08-03 17:46:29 +05301584
1585 qca_baudrate = qca_get_baudrate_value(speed);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001586 }
1587
Harish Bandi523760b2019-04-26 19:26:01 +05301588 if (!qca_is_wcn399x(soc_type)) {
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301589 /* Get QCA version information */
Balakrishna Godavarthi7d250a02019-11-06 15:18:32 +05301590 ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301591 if (ret)
1592 return ret;
1593 }
Balakrishna Godavarthiaadebac2018-08-03 17:46:28 +05301594
1595 bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001596 /* Setup patch / NVM configurations */
Rocky Liao99c905c2019-06-06 17:40:30 +08001597 ret = qca_uart_setup(hdev, qca_baudrate, soc_type, soc_ver,
1598 firmware_name);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001599 if (!ret) {
Matthias Kaehlcke62a91992019-04-29 16:21:30 -07001600 set_bit(QCA_IBS_ENABLED, &qca->flags);
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001601 qca_debugfs_init(hdev);
Balakrishna Godavarthid8415022020-01-02 20:19:11 +05301602 hu->hdev->hw_error = qca_hw_error;
1603 hu->hdev->cmd_timeout = qca_cmd_timeout;
Loic Poulainba8f3592017-11-06 12:16:56 +01001604 } else if (ret == -ENOENT) {
1605 /* No patch/nvm-config found, run with original fw/config */
1606 ret = 0;
Amit Pundir7dc5fe02018-04-16 12:10:24 +05301607 } else if (ret == -EAGAIN) {
1608 /*
1609 * Userspace firmware loader will return -EAGAIN in case no
1610 * patch/nvm-config is found, so run with original fw/config.
1611 */
1612 ret = 0;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001613 }
1614
1615 /* Setup bdaddr */
Harish Bandi523760b2019-04-26 19:26:01 +05301616 if (qca_is_wcn399x(soc_type))
Balakrishna Godavarthi5c0a10012019-01-16 18:01:15 +05301617 hu->hdev->set_bdaddr = qca_set_bdaddr;
1618 else
1619 hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001620
1621 return ret;
1622}
1623
Nishka Dasgupta2edc9c52019-08-15 11:21:49 +05301624static const struct hci_uart_proto qca_proto = {
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001625 .id = HCI_UART_QCA,
1626 .name = "QCA",
Marcel Holtmannaee61f72015-10-20 21:30:45 +02001627 .manufacturer = 29,
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001628 .init_speed = 115200,
1629 .oper_speed = 3000000,
1630 .open = qca_open,
1631 .close = qca_close,
1632 .flush = qca_flush,
1633 .setup = qca_setup,
1634 .recv = qca_recv,
1635 .enqueue = qca_enqueue,
1636 .dequeue = qca_dequeue,
1637};
1638
Harish Bandi523760b2019-04-26 19:26:01 +05301639static const struct qca_vreg_data qca_soc_data_wcn3990 = {
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301640 .soc_type = QCA_WCN3990,
1641 .vregs = (struct qca_vreg []) {
Bjorn Anderssonf2edd662019-10-17 22:24:02 -07001642 { "vddio", 15000 },
1643 { "vddxo", 80000 },
1644 { "vddrf", 300000 },
1645 { "vddch0", 450000 },
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301646 },
1647 .num_vregs = 4,
1648};
1649
Balakrishna Godavarthi7d250a02019-11-06 15:18:32 +05301650static const struct qca_vreg_data qca_soc_data_wcn3991 = {
1651 .soc_type = QCA_WCN3991,
1652 .vregs = (struct qca_vreg []) {
1653 { "vddio", 15000 },
1654 { "vddxo", 80000 },
1655 { "vddrf", 300000 },
1656 { "vddch0", 450000 },
1657 },
1658 .num_vregs = 4,
1659};
1660
Harish Bandi523760b2019-04-26 19:26:01 +05301661static const struct qca_vreg_data qca_soc_data_wcn3998 = {
1662 .soc_type = QCA_WCN3998,
1663 .vregs = (struct qca_vreg []) {
Bjorn Anderssonf2edd662019-10-17 22:24:02 -07001664 { "vddio", 10000 },
1665 { "vddxo", 80000 },
1666 { "vddrf", 300000 },
1667 { "vddch0", 450000 },
Harish Bandi523760b2019-04-26 19:26:01 +05301668 },
1669 .num_vregs = 4,
1670};
1671
Balakrishna Godavarthic2d78272018-08-22 17:50:05 +05301672static void qca_power_shutdown(struct hci_uart *hu)
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301673{
Bjorn Anderssona9314e72019-10-17 22:24:04 -07001674 struct qca_serdev *qcadev;
Balakrishna Godavarthi035a9602019-02-04 20:36:43 +05301675 struct qca_data *qca = hu->priv;
1676 unsigned long flags;
1677
Bjorn Anderssona9314e72019-10-17 22:24:04 -07001678 qcadev = serdev_device_get_drvdata(hu->serdev);
1679
Balakrishna Godavarthi035a9602019-02-04 20:36:43 +05301680 /* From this point we go into power off state. But serial port is
1681 * still open, stop queueing the IBS data and flush all the buffered
1682 * data in skb's.
1683 */
1684 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
Matthias Kaehlcke62a91992019-04-29 16:21:30 -07001685 clear_bit(QCA_IBS_ENABLED, &qca->flags);
Balakrishna Godavarthi035a9602019-02-04 20:36:43 +05301686 qca_flush(hu);
1687 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
1688
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301689 host_set_baudrate(hu, 2400);
Matthias Kaehlcke9836b802019-02-26 11:46:45 -08001690 qca_send_power_pulse(hu, false);
Bjorn Anderssona9314e72019-10-17 22:24:04 -07001691 qca_regulator_disable(qcadev);
Balakrishna Godavarthid8415022020-01-02 20:19:11 +05301692 hu->hdev->hw_error = NULL;
1693 hu->hdev->cmd_timeout = NULL;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301694}
1695
Balakrishna Godavarthi3e4be652018-09-24 20:14:45 +05301696static int qca_power_off(struct hci_dev *hdev)
1697{
1698 struct hci_uart *hu = hci_get_drvdata(hdev);
Balakrishna Godavarthid8415022020-01-02 20:19:11 +05301699 struct qca_data *qca = hu->priv;
Balakrishna Godavarthi3e4be652018-09-24 20:14:45 +05301700
Balakrishna Godavarthid8415022020-01-02 20:19:11 +05301701 /* Stop sending shutdown command if soc crashes. */
1702 if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
1703 qca_send_pre_shutdown_cmd(hdev);
1704 usleep_range(8000, 10000);
1705 }
Harish Bandia2780882019-07-12 10:39:40 +05301706
Balakrishna Godavarthid8415022020-01-02 20:19:11 +05301707 qca->memdump_state = QCA_MEMDUMP_IDLE;
Balakrishna Godavarthi3e4be652018-09-24 20:14:45 +05301708 qca_power_shutdown(hu);
1709 return 0;
1710}
1711
Bjorn Anderssona9314e72019-10-17 22:24:04 -07001712static int qca_regulator_enable(struct qca_serdev *qcadev)
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301713{
Bjorn Anderssona9314e72019-10-17 22:24:04 -07001714 struct qca_power *power = qcadev->bt_power;
1715 int ret;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301716
Bjorn Anderssona9314e72019-10-17 22:24:04 -07001717 /* Already enabled */
1718 if (power->vregs_on)
1719 return 0;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301720
Bjorn Anderssona9314e72019-10-17 22:24:04 -07001721 BT_DBG("enabling %d regulators)", power->num_vregs);
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301722
Bjorn Anderssona9314e72019-10-17 22:24:04 -07001723 ret = regulator_bulk_enable(power->num_vregs, power->vreg_bulk);
1724 if (ret)
1725 return ret;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301726
Bjorn Anderssona9314e72019-10-17 22:24:04 -07001727 power->vregs_on = true;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301728
Bjorn Andersson163d42f2019-10-17 22:24:03 -07001729 return 0;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301730}
1731
Bjorn Anderssona9314e72019-10-17 22:24:04 -07001732static void qca_regulator_disable(struct qca_serdev *qcadev)
1733{
1734 struct qca_power *power;
1735
1736 if (!qcadev)
1737 return;
1738
1739 power = qcadev->bt_power;
1740
1741 /* Already disabled? */
1742 if (!power->vregs_on)
1743 return;
1744
1745 regulator_bulk_disable(power->num_vregs, power->vreg_bulk);
1746 power->vregs_on = false;
1747}
1748
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301749static int qca_init_regulators(struct qca_power *qca,
1750 const struct qca_vreg *vregs, size_t num_vregs)
1751{
Bjorn Anderssonc29ff102019-10-17 22:24:01 -07001752 struct regulator_bulk_data *bulk;
1753 int ret;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301754 int i;
1755
Bjorn Anderssonc29ff102019-10-17 22:24:01 -07001756 bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL);
1757 if (!bulk)
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301758 return -ENOMEM;
1759
1760 for (i = 0; i < num_vregs; i++)
Bjorn Anderssonc29ff102019-10-17 22:24:01 -07001761 bulk[i].supply = vregs[i].name;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301762
Bjorn Anderssonc29ff102019-10-17 22:24:01 -07001763 ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk);
1764 if (ret < 0)
1765 return ret;
1766
1767 for (i = 0; i < num_vregs; i++) {
1768 ret = regulator_set_load(bulk[i].consumer, vregs[i].load_uA);
1769 if (ret)
1770 return ret;
1771 }
1772
1773 qca->vreg_bulk = bulk;
Bjorn Andersson163d42f2019-10-17 22:24:03 -07001774 qca->num_vregs = num_vregs;
Bjorn Anderssonc29ff102019-10-17 22:24:01 -07001775
1776 return 0;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301777}
1778
Thierry Escande05ba5332018-03-29 21:15:24 +02001779static int qca_serdev_probe(struct serdev_device *serdev)
1780{
1781 struct qca_serdev *qcadev;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301782 const struct qca_vreg_data *data;
Thierry Escande05ba5332018-03-29 21:15:24 +02001783 int err;
1784
1785 qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL);
1786 if (!qcadev)
1787 return -ENOMEM;
1788
1789 qcadev->serdev_hu.serdev = serdev;
Rocky Liao9f3565b2019-12-13 16:50:45 +08001790 data = device_get_match_data(&serdev->dev);
Thierry Escande05ba5332018-03-29 21:15:24 +02001791 serdev_device_set_drvdata(serdev, qcadev);
Rocky Liao99c905c2019-06-06 17:40:30 +08001792 device_property_read_string(&serdev->dev, "firmware-name",
1793 &qcadev->firmware_name);
Harish Bandi523760b2019-04-26 19:26:01 +05301794 if (data && qca_is_wcn399x(data->soc_type)) {
1795 qcadev->btsoc_type = data->soc_type;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301796 qcadev->bt_power = devm_kzalloc(&serdev->dev,
1797 sizeof(struct qca_power),
1798 GFP_KERNEL);
1799 if (!qcadev->bt_power)
1800 return -ENOMEM;
Thierry Escande05ba5332018-03-29 21:15:24 +02001801
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301802 qcadev->bt_power->dev = &serdev->dev;
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301803 err = qca_init_regulators(qcadev->bt_power, data->vregs,
1804 data->num_vregs);
1805 if (err) {
1806 BT_ERR("Failed to init regulators:%d", err);
1807 goto out;
1808 }
1809
1810 qcadev->bt_power->vregs_on = false;
1811
1812 device_property_read_u32(&serdev->dev, "max-speed",
1813 &qcadev->oper_speed);
1814 if (!qcadev->oper_speed)
1815 BT_DBG("UART will pick default operating speed");
1816
1817 err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
1818 if (err) {
1819 BT_ERR("wcn3990 serdev registration failed");
1820 goto out;
1821 }
1822 } else {
1823 qcadev->btsoc_type = QCA_ROME;
1824 qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable",
1825 GPIOD_OUT_LOW);
1826 if (IS_ERR(qcadev->bt_en)) {
1827 dev_err(&serdev->dev, "failed to acquire enable gpio\n");
1828 return PTR_ERR(qcadev->bt_en);
1829 }
1830
1831 qcadev->susclk = devm_clk_get(&serdev->dev, NULL);
1832 if (IS_ERR(qcadev->susclk)) {
1833 dev_err(&serdev->dev, "failed to acquire clk\n");
1834 return PTR_ERR(qcadev->susclk);
1835 }
1836
1837 err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
1838 if (err)
1839 return err;
1840
1841 err = clk_prepare_enable(qcadev->susclk);
1842 if (err)
1843 return err;
1844
1845 err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
1846 if (err)
1847 clk_disable_unprepare(qcadev->susclk);
Thierry Escande05ba5332018-03-29 21:15:24 +02001848 }
1849
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301850out: return err;
Thierry Escande05ba5332018-03-29 21:15:24 +02001851
Thierry Escande05ba5332018-03-29 21:15:24 +02001852}
1853
1854static void qca_serdev_remove(struct serdev_device *serdev)
1855{
1856 struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
1857
Harish Bandi523760b2019-04-26 19:26:01 +05301858 if (qca_is_wcn399x(qcadev->btsoc_type))
Balakrishna Godavarthic2d78272018-08-22 17:50:05 +05301859 qca_power_shutdown(&qcadev->serdev_hu);
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301860 else
1861 clk_disable_unprepare(qcadev->susclk);
Thierry Escande05ba5332018-03-29 21:15:24 +02001862
Balakrishna Godavarthifa9ad872018-08-03 17:46:32 +05301863 hci_uart_unregister_device(&qcadev->serdev_hu);
Thierry Escande05ba5332018-03-29 21:15:24 +02001864}
1865
Claire Chang41d5b252019-10-31 18:46:14 +08001866static int __maybe_unused qca_suspend(struct device *dev)
1867{
1868 struct hci_dev *hdev = container_of(dev, struct hci_dev, dev);
1869 struct hci_uart *hu = hci_get_drvdata(hdev);
1870 struct qca_data *qca = hu->priv;
1871 unsigned long flags;
1872 int ret = 0;
1873 u8 cmd;
1874
1875 set_bit(QCA_SUSPENDING, &qca->flags);
1876
1877 /* Device is downloading patch or doesn't support in-band sleep. */
1878 if (!test_bit(QCA_IBS_ENABLED, &qca->flags))
1879 return 0;
1880
1881 cancel_work_sync(&qca->ws_awake_device);
1882 cancel_work_sync(&qca->ws_awake_rx);
1883
1884 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
1885 flags, SINGLE_DEPTH_NESTING);
1886
1887 switch (qca->tx_ibs_state) {
1888 case HCI_IBS_TX_WAKING:
1889 del_timer(&qca->wake_retrans_timer);
1890 /* Fall through */
1891 case HCI_IBS_TX_AWAKE:
1892 del_timer(&qca->tx_idle_timer);
1893
1894 serdev_device_write_flush(hu->serdev);
1895 cmd = HCI_IBS_SLEEP_IND;
1896 ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd));
1897
1898 if (ret < 0) {
1899 BT_ERR("Failed to send SLEEP to device");
1900 break;
1901 }
1902
1903 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
1904 qca->ibs_sent_slps++;
1905
1906 qca_wq_serial_tx_clock_vote_off(&qca->ws_tx_vote_off);
1907 break;
1908
1909 case HCI_IBS_TX_ASLEEP:
1910 break;
1911
1912 default:
1913 BT_ERR("Spurious tx state %d", qca->tx_ibs_state);
1914 ret = -EINVAL;
1915 break;
1916 }
1917
1918 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
1919
1920 if (ret < 0)
1921 goto error;
1922
1923 serdev_device_wait_until_sent(hu->serdev,
1924 msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
1925
1926 /* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going
1927 * to sleep, so that the packet does not wake the system later.
1928 */
1929
1930 ret = wait_event_interruptible_timeout(qca->suspend_wait_q,
1931 qca->rx_ibs_state == HCI_IBS_RX_ASLEEP,
1932 msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS));
1933
1934 if (ret > 0)
1935 return 0;
1936
1937 if (ret == 0)
1938 ret = -ETIMEDOUT;
1939
1940error:
1941 clear_bit(QCA_SUSPENDING, &qca->flags);
1942
1943 return ret;
1944}
1945
1946static int __maybe_unused qca_resume(struct device *dev)
1947{
1948 struct hci_dev *hdev = container_of(dev, struct hci_dev, dev);
1949 struct hci_uart *hu = hci_get_drvdata(hdev);
1950 struct qca_data *qca = hu->priv;
1951
1952 clear_bit(QCA_SUSPENDING, &qca->flags);
1953
1954 return 0;
1955}
1956
1957static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume);
1958
Thierry Escande05ba5332018-03-29 21:15:24 +02001959static const struct of_device_id qca_bluetooth_of_match[] = {
1960 { .compatible = "qcom,qca6174-bt" },
Harish Bandi523760b2019-04-26 19:26:01 +05301961 { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
Balakrishna Godavarthi7d250a02019-11-06 15:18:32 +05301962 { .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
Harish Bandi523760b2019-04-26 19:26:01 +05301963 { .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
Thierry Escande05ba5332018-03-29 21:15:24 +02001964 { /* sentinel */ }
1965};
1966MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
1967
1968static struct serdev_device_driver qca_serdev_driver = {
1969 .probe = qca_serdev_probe,
1970 .remove = qca_serdev_remove,
1971 .driver = {
1972 .name = "hci_uart_qca",
1973 .of_match_table = qca_bluetooth_of_match,
Claire Chang41d5b252019-10-31 18:46:14 +08001974 .pm = &qca_pm_ops,
Thierry Escande05ba5332018-03-29 21:15:24 +02001975 },
1976};
1977
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001978int __init qca_init(void)
1979{
Thierry Escande05ba5332018-03-29 21:15:24 +02001980 serdev_device_driver_register(&qca_serdev_driver);
1981
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001982 return hci_uart_register_proto(&qca_proto);
1983}
1984
1985int __exit qca_deinit(void)
1986{
Thierry Escande05ba5332018-03-29 21:15:24 +02001987 serdev_device_driver_unregister(&qca_serdev_driver);
1988
Ben Young Tae Kim0ff252c2015-08-10 14:24:17 -07001989 return hci_uart_unregister_proto(&qca_proto);
1990}