blob: 5587a9926cc60bad9228b3a515ce53c085037852 [file] [log] [blame]
Govind Singh6e0355a2020-08-13 12:04:20 +03001// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/module.h>
Govind Singh5697a562020-08-13 12:04:22 +03007#include <linux/msi.h>
Govind Singh6e0355a2020-08-13 12:04:20 +03008#include <linux/pci.h>
9
Govind Singh57626132020-08-13 12:04:21 +030010#include "pci.h"
Govind Singh6e0355a2020-08-13 12:04:20 +030011#include "core.h"
Govind Singh1399fb82020-08-13 12:04:24 +030012#include "hif.h"
13#include "mhi.h"
Govind Singh6e0355a2020-08-13 12:04:20 +030014#include "debug.h"
15
Govind Singh57626132020-08-13 12:04:21 +030016#define ATH11K_PCI_BAR_NUM 0
17#define ATH11K_PCI_DMA_MASK 32
18
Govind Singh7f4beda2020-08-13 12:04:25 +030019#define ATH11K_PCI_IRQ_CE0_OFFSET 3
20
Govind Singh654e9592020-08-14 10:10:23 +030021#define WINDOW_ENABLE_BIT 0x40000000
22#define WINDOW_REG_ADDRESS 0x310c
23#define WINDOW_VALUE_MASK GENMASK(24, 19)
24#define WINDOW_START 0x80000
25#define WINDOW_RANGE_MASK GENMASK(18, 0)
26
Kalle Valo18ac1662020-09-29 20:15:32 +030027#define TCSR_SOC_HW_VERSION 0x0224
28#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(16, 8)
29#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
30
Carl Huanga05bd852020-10-01 12:34:43 +030031/* BAR0 + 4k is always accessible, and no
32 * need to force wakeup.
33 * 4K - 32 = 0xFE0
34 */
35#define ACCESS_ALWAYS_OFF 0xFE0
36
Govind Singh6e0355a2020-08-13 12:04:20 +030037#define QCA6390_DEVICE_ID 0x1101
38
39static const struct pci_device_id ath11k_pci_id_table[] = {
40 { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
41 {0}
42};
43
44MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table);
45
Govind Singh1ff8ed72020-08-13 12:04:26 +030046static const struct ath11k_bus_params ath11k_pci_bus_params = {
47 .mhi_support = true,
Govind Singh56970452020-08-14 10:10:20 +030048 .m3_fw_support = true,
Govind Singh6eb6ea52020-08-14 10:10:21 +030049 .fixed_bdf_addr = false,
50 .fixed_mem_region = false,
Govind Singh1ff8ed72020-08-13 12:04:26 +030051};
52
Govind Singh5697a562020-08-13 12:04:22 +030053static const struct ath11k_msi_config msi_config = {
54 .total_vectors = 32,
55 .total_users = 4,
56 .users = (struct ath11k_msi_user[]) {
57 { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
58 { .name = "CE", .num_vectors = 10, .base_vector = 3 },
59 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
60 { .name = "DP", .num_vectors = 18, .base_vector = 14 },
61 },
62};
63
Govind Singh7f4beda2020-08-13 12:04:25 +030064static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
65 "bhi",
66 "mhi-er0",
67 "mhi-er1",
68 "ce0",
69 "ce1",
70 "ce2",
71 "ce3",
72 "ce4",
73 "ce5",
74 "ce6",
75 "ce7",
76 "ce8",
77 "ce9",
78 "ce10",
79 "ce11",
80 "host2wbm-desc-feed",
81 "host2reo-re-injection",
82 "host2reo-command",
83 "host2rxdma-monitor-ring3",
84 "host2rxdma-monitor-ring2",
85 "host2rxdma-monitor-ring1",
86 "reo2ost-exception",
87 "wbm2host-rx-release",
88 "reo2host-status",
89 "reo2host-destination-ring4",
90 "reo2host-destination-ring3",
91 "reo2host-destination-ring2",
92 "reo2host-destination-ring1",
93 "rxdma2host-monitor-destination-mac3",
94 "rxdma2host-monitor-destination-mac2",
95 "rxdma2host-monitor-destination-mac1",
96 "ppdu-end-interrupts-mac3",
97 "ppdu-end-interrupts-mac2",
98 "ppdu-end-interrupts-mac1",
99 "rxdma2host-monitor-status-ring-mac3",
100 "rxdma2host-monitor-status-ring-mac2",
101 "rxdma2host-monitor-status-ring-mac1",
102 "host2rxdma-host-buf-ring-mac3",
103 "host2rxdma-host-buf-ring-mac2",
104 "host2rxdma-host-buf-ring-mac1",
105 "rxdma2host-destination-ring-mac3",
106 "rxdma2host-destination-ring-mac2",
107 "rxdma2host-destination-ring-mac1",
108 "host2tcl-input-ring4",
109 "host2tcl-input-ring3",
110 "host2tcl-input-ring2",
111 "host2tcl-input-ring1",
112 "wbm2host-tx-completions-ring3",
113 "wbm2host-tx-completions-ring2",
114 "wbm2host-tx-completions-ring1",
115 "tcl2host-status-ring",
116};
117
Govind Singh654e9592020-08-14 10:10:23 +0300118static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset)
119{
120 struct ath11k_base *ab = ab_pci->ab;
121
122 u32 window = FIELD_GET(WINDOW_VALUE_MASK, offset);
123
124 lockdep_assert_held(&ab_pci->window_lock);
125
126 if (window != ab_pci->register_window) {
127 iowrite32(WINDOW_ENABLE_BIT | window,
128 ab->mem + WINDOW_REG_ADDRESS);
Carl Huangf6fa37a2020-12-11 19:35:43 +0200129 ioread32(ab->mem + WINDOW_REG_ADDRESS);
Govind Singh654e9592020-08-14 10:10:23 +0300130 ab_pci->register_window = window;
131 }
132}
133
Carl Huangf3c603d2020-08-17 13:31:55 +0300134void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value)
Govind Singh654e9592020-08-14 10:10:23 +0300135{
136 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
137
Carl Huanga05bd852020-10-01 12:34:43 +0300138 /* for offset beyond BAR + 4K - 32, may
139 * need to wakeup MHI to access.
140 */
141 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
142 offset >= ACCESS_ALWAYS_OFF)
143 mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
144
Govind Singh654e9592020-08-14 10:10:23 +0300145 if (offset < WINDOW_START) {
146 iowrite32(value, ab->mem + offset);
147 } else {
148 spin_lock_bh(&ab_pci->window_lock);
149 ath11k_pci_select_window(ab_pci, offset);
150 iowrite32(value, ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK));
151 spin_unlock_bh(&ab_pci->window_lock);
152 }
Carl Huanga05bd852020-10-01 12:34:43 +0300153
154 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
155 offset >= ACCESS_ALWAYS_OFF)
156 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
Govind Singh654e9592020-08-14 10:10:23 +0300157}
158
Carl Huangf3c603d2020-08-17 13:31:55 +0300159u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset)
Govind Singh654e9592020-08-14 10:10:23 +0300160{
161 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
162 u32 val;
163
Carl Huanga05bd852020-10-01 12:34:43 +0300164 /* for offset beyond BAR + 4K - 32, may
165 * need to wakeup MHI to access.
166 */
167 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
168 offset >= ACCESS_ALWAYS_OFF)
169 mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
170
Govind Singh654e9592020-08-14 10:10:23 +0300171 if (offset < WINDOW_START) {
172 val = ioread32(ab->mem + offset);
173 } else {
174 spin_lock_bh(&ab_pci->window_lock);
175 ath11k_pci_select_window(ab_pci, offset);
176 val = ioread32(ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK));
177 spin_unlock_bh(&ab_pci->window_lock);
178 }
179
Carl Huanga05bd852020-10-01 12:34:43 +0300180 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
181 offset >= ACCESS_ALWAYS_OFF)
182 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
183
Govind Singh654e9592020-08-14 10:10:23 +0300184 return val;
185}
186
Carl Huangf3c603d2020-08-17 13:31:55 +0300187static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
188{
189 u32 val, delay;
190
191 val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
192
193 val |= PCIE_SOC_GLOBAL_RESET_V;
194
195 ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
196
197 /* TODO: exact time to sleep is uncertain */
198 delay = 10;
199 mdelay(delay);
200
201 /* Need to toggle V bit back otherwise stuck in reset status */
202 val &= ~PCIE_SOC_GLOBAL_RESET_V;
203
204 ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
205
206 mdelay(delay);
207
208 val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
209 if (val == 0xffffffff)
210 ath11k_warn(ab, "link down error during global reset\n");
211}
212
213static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
214{
215 u32 val;
216
217 /* read cookie */
218 val = ath11k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR);
219 ath11k_dbg(ab, ATH11K_DBG_PCI, "cookie:0x%x\n", val);
220
221 val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
222 ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
223
224 /* TODO: exact time to sleep is uncertain */
225 mdelay(10);
226
227 /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
228 * continuing warm path and entering dead loop.
229 */
230 ath11k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0);
231 mdelay(10);
232
233 val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
234 ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
235
236 /* A read clear register. clear the register to prevent
237 * Q6 from entering wrong code path.
238 */
239 val = ath11k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
240 ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause:%d\n", val);
241}
242
Carl Huang06999402020-12-10 16:05:22 +0200243static int ath11k_pci_set_link_reg(struct ath11k_base *ab,
244 u32 offset, u32 value, u32 mask)
245{
246 u32 v;
247 int i;
248
249 v = ath11k_pci_read32(ab, offset);
250 if ((v & mask) == value)
251 return 0;
252
253 for (i = 0; i < 10; i++) {
254 ath11k_pci_write32(ab, offset, (v & ~mask) | value);
255
256 v = ath11k_pci_read32(ab, offset);
257 if ((v & mask) == value)
258 return 0;
259
260 mdelay(2);
261 }
262
263 ath11k_warn(ab, "failed to set pcie link register 0x%08x: 0x%08x != 0x%08x\n",
264 offset, v & mask, value);
265
266 return -ETIMEDOUT;
267}
268
269static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
270{
271 int ret;
272
273 ret = ath11k_pci_set_link_reg(ab,
274 PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG,
275 PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
276 PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
277 if (!ret) {
278 ath11k_warn(ab, "failed to set sysclk: %d\n", ret);
279 return ret;
280 }
281
282 ret = ath11k_pci_set_link_reg(ab,
283 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG,
284 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL,
285 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
286 if (!ret) {
287 ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret);
288 return ret;
289 }
290
291 ret = ath11k_pci_set_link_reg(ab,
292 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG,
293 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL,
294 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
295 if (!ret) {
296 ath11k_warn(ab, "failed to set dtct config2: %d\n", ret);
297 return ret;
298 }
299
300 ret = ath11k_pci_set_link_reg(ab,
301 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG,
302 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL,
303 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
304 if (!ret) {
305 ath11k_warn(ab, "failed to set dtct config4: %d\n", ret);
306 return ret;
307 }
308
309 return 0;
310}
311
Carl Huangbabb0ce2020-12-10 16:05:21 +0200312static void ath11k_pci_enable_ltssm(struct ath11k_base *ab)
313{
314 u32 val;
315 int i;
316
317 val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
318
319 /* PCIE link seems very unstable after the Hot Reset*/
320 for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
321 if (val == 0xffffffff)
322 mdelay(5);
323
324 ath11k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
325 val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
326 }
327
328 ath11k_dbg(ab, ATH11K_DBG_PCI, "pci ltssm 0x%x\n", val);
329
330 val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
Kalle Valo562934a2020-12-16 20:24:11 +0200331 val |= GCC_GCC_PCIE_HOT_RST_VAL;
Carl Huangbabb0ce2020-12-10 16:05:21 +0200332 ath11k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
333 val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
334
335 ath11k_dbg(ab, ATH11K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val);
336
337 mdelay(5);
338}
339
340static void ath11k_pci_clear_all_intrs(struct ath11k_base *ab)
341{
342 /* This is a WAR for PCIE Hotreset.
343 * When target receive Hotreset, but will set the interrupt.
344 * So when download SBL again, SBL will open Interrupt and
345 * receive it, and crash immediately.
346 */
347 ath11k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
348}
349
Carl Huang0ccdf432020-12-10 16:05:23 +0200350static void ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base *ab)
351{
352 u32 val;
353
354 val = ath11k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
355 val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
356 ath11k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
357}
358
Carl Huangf3c603d2020-08-17 13:31:55 +0300359static void ath11k_pci_force_wake(struct ath11k_base *ab)
360{
361 ath11k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
362 mdelay(5);
363}
364
Carl Huangbabb0ce2020-12-10 16:05:21 +0200365static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
Carl Huangf3c603d2020-08-17 13:31:55 +0300366{
Carl Huangbabb0ce2020-12-10 16:05:21 +0200367 if (power_on) {
368 ath11k_pci_enable_ltssm(ab);
369 ath11k_pci_clear_all_intrs(ab);
Carl Huang0ccdf432020-12-10 16:05:23 +0200370 ath11k_pci_set_wlaon_pwr_ctrl(ab);
Carl Huang06999402020-12-10 16:05:22 +0200371 ath11k_pci_fix_l1ss(ab);
Carl Huangbabb0ce2020-12-10 16:05:21 +0200372 }
373
Carl Huangf3c603d2020-08-17 13:31:55 +0300374 ath11k_mhi_clear_vector(ab);
375 ath11k_pci_soc_global_reset(ab);
376 ath11k_mhi_set_mhictrl_reset(ab);
377 ath11k_pci_clear_dbg_registers(ab);
378}
379
Govind Singh1399fb82020-08-13 12:04:24 +0300380int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector)
381{
382 struct pci_dev *pci_dev = to_pci_dev(dev);
383
384 return pci_irq_vector(pci_dev, vector);
385}
386
Govind Singhc4eacab2020-08-14 10:10:24 +0300387static void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
388 u32 *msi_addr_hi)
389{
Anilkumar Kollie8e55d82020-12-07 16:16:04 +0200390 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
Govind Singhc4eacab2020-08-14 10:10:24 +0300391 struct pci_dev *pci_dev = to_pci_dev(ab->dev);
392
393 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
394 msi_addr_lo);
395
Anilkumar Kollie8e55d82020-12-07 16:16:04 +0200396 if (test_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) {
397 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
398 msi_addr_hi);
399 } else {
400 *msi_addr_hi = 0;
401 }
Govind Singhc4eacab2020-08-14 10:10:24 +0300402}
403
Govind Singh1399fb82020-08-13 12:04:24 +0300404int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_name,
405 int *num_vectors, u32 *user_base_data,
406 u32 *base_vector)
407{
408 struct ath11k_base *ab = ab_pci->ab;
409 int idx;
410
411 for (idx = 0; idx < msi_config.total_users; idx++) {
412 if (strcmp(user_name, msi_config.users[idx].name) == 0) {
413 *num_vectors = msi_config.users[idx].num_vectors;
414 *user_base_data = msi_config.users[idx].base_vector
415 + ab_pci->msi_ep_base_data;
416 *base_vector = msi_config.users[idx].base_vector;
417
418 ath11k_dbg(ab, ATH11K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
419 user_name, *num_vectors, *user_base_data,
420 *base_vector);
421
422 return 0;
423 }
424 }
425
426 ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
427
428 return -EINVAL;
429}
430
Govind Singhc4eacab2020-08-14 10:10:24 +0300431static int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
432 int *num_vectors, u32 *user_base_data,
433 u32 *base_vector)
434{
435 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
436
437 return ath11k_pci_get_user_msi_assignment(ab_pci, user_name,
438 num_vectors, user_base_data,
439 base_vector);
440}
441
Carl Huangd4ecb902020-08-17 13:31:52 +0300442static void ath11k_pci_free_ext_irq(struct ath11k_base *ab)
443{
444 int i, j;
445
446 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
447 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
448
449 for (j = 0; j < irq_grp->num_irq; j++)
450 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
451
452 netif_napi_del(&irq_grp->napi);
453 }
454}
455
Govind Singh7f4beda2020-08-13 12:04:25 +0300456static void ath11k_pci_free_irq(struct ath11k_base *ab)
457{
458 int i, irq_idx;
459
Kalle Valod9d4b5f2020-08-17 13:31:48 +0300460 for (i = 0; i < ab->hw_params.ce_count; i++) {
Carl Huange3396b82020-08-17 13:31:47 +0300461 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
Govind Singh7f4beda2020-08-13 12:04:25 +0300462 continue;
463 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
464 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
465 }
Carl Huangd4ecb902020-08-17 13:31:52 +0300466
467 ath11k_pci_free_ext_irq(ab);
Govind Singh7f4beda2020-08-13 12:04:25 +0300468}
469
Govind Singh2c3960c2020-08-14 10:10:25 +0300470static void ath11k_pci_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
471{
472 u32 irq_idx;
473
474 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
475 enable_irq(ab->irq_num[irq_idx]);
476}
477
Govind Singh7f4beda2020-08-13 12:04:25 +0300478static void ath11k_pci_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
479{
480 u32 irq_idx;
481
482 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
483 disable_irq_nosync(ab->irq_num[irq_idx]);
484}
485
Govind Singh2c3960c2020-08-14 10:10:25 +0300486static void ath11k_pci_ce_irqs_disable(struct ath11k_base *ab)
487{
488 int i;
489
Kalle Valod9d4b5f2020-08-17 13:31:48 +0300490 for (i = 0; i < ab->hw_params.ce_count; i++) {
Carl Huange3396b82020-08-17 13:31:47 +0300491 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
Govind Singh2c3960c2020-08-14 10:10:25 +0300492 continue;
493 ath11k_pci_ce_irq_disable(ab, i);
494 }
495}
496
497static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab)
498{
499 int i;
500 int irq_idx;
501
Kalle Valod9d4b5f2020-08-17 13:31:48 +0300502 for (i = 0; i < ab->hw_params.ce_count; i++) {
Carl Huange3396b82020-08-17 13:31:47 +0300503 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
Govind Singh2c3960c2020-08-14 10:10:25 +0300504 continue;
505
506 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
507 synchronize_irq(ab->irq_num[irq_idx]);
508 }
509}
510
Allen Pais0f01dcb2020-10-07 16:03:09 +0530511static void ath11k_pci_ce_tasklet(struct tasklet_struct *t)
Govind Singh2c3960c2020-08-14 10:10:25 +0300512{
Allen Pais0f01dcb2020-10-07 16:03:09 +0530513 struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
Govind Singh2c3960c2020-08-14 10:10:25 +0300514
515 ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
516
517 ath11k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
518}
519
Govind Singh7f4beda2020-08-13 12:04:25 +0300520static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg)
521{
522 struct ath11k_ce_pipe *ce_pipe = arg;
523
524 ath11k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
Govind Singh2c3960c2020-08-14 10:10:25 +0300525 tasklet_schedule(&ce_pipe->intr_tq);
Govind Singh7f4beda2020-08-13 12:04:25 +0300526
527 return IRQ_HANDLED;
528}
529
Carl Huangd4ecb902020-08-17 13:31:52 +0300530static void ath11k_pci_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
531{
532 int i;
533
534 for (i = 0; i < irq_grp->num_irq; i++)
535 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
536}
537
538static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc)
539{
540 int i;
541
542 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
543 struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
544
545 ath11k_pci_ext_grp_disable(irq_grp);
546
547 napi_synchronize(&irq_grp->napi);
548 napi_disable(&irq_grp->napi);
549 }
550}
551
552static void ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
553{
554 int i;
555
556 for (i = 0; i < irq_grp->num_irq; i++)
557 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
558}
559
560static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab)
561{
562 int i;
563
564 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
565 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
566
567 napi_enable(&irq_grp->napi);
568 ath11k_pci_ext_grp_enable(irq_grp);
569 }
570}
571
572static void ath11k_pci_sync_ext_irqs(struct ath11k_base *ab)
573{
574 int i, j, irq_idx;
575
576 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
577 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
578
579 for (j = 0; j < irq_grp->num_irq; j++) {
580 irq_idx = irq_grp->irqs[j];
581 synchronize_irq(ab->irq_num[irq_idx]);
582 }
583 }
584}
585
586static void ath11k_pci_ext_irq_disable(struct ath11k_base *ab)
587{
588 __ath11k_pci_ext_irq_disable(ab);
589 ath11k_pci_sync_ext_irqs(ab);
590}
591
592static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
593{
594 struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
595 struct ath11k_ext_irq_grp,
596 napi);
597 struct ath11k_base *ab = irq_grp->ab;
598 int work_done;
599
600 work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
601 if (work_done < budget) {
602 napi_complete_done(napi, work_done);
603 ath11k_pci_ext_grp_enable(irq_grp);
604 }
605
606 if (work_done > budget)
607 work_done = budget;
608
609 return work_done;
610}
611
612static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg)
613{
614 struct ath11k_ext_irq_grp *irq_grp = arg;
615
616 ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq);
617
618 ath11k_pci_ext_grp_disable(irq_grp);
619
620 napi_schedule(&irq_grp->napi);
621
622 return IRQ_HANDLED;
623}
624
625static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
626{
627 int i, j, ret, num_vectors = 0;
628 u32 user_base_data = 0, base_vector = 0;
629
Colin Ian Kingb2c09452020-08-19 12:14:52 +0100630 ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP",
631 &num_vectors,
632 &user_base_data,
633 &base_vector);
634 if (ret < 0)
635 return ret;
Carl Huangd4ecb902020-08-17 13:31:52 +0300636
637 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
638 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
639 u32 num_irq = 0;
640
641 irq_grp->ab = ab;
642 irq_grp->grp_id = i;
643 init_dummy_netdev(&irq_grp->napi_ndev);
644 netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
645 ath11k_pci_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
646
647 if (ab->hw_params.ring_mask->tx[i] ||
648 ab->hw_params.ring_mask->rx[i] ||
649 ab->hw_params.ring_mask->rx_err[i] ||
650 ab->hw_params.ring_mask->rx_wbm_rel[i] ||
651 ab->hw_params.ring_mask->reo_status[i] ||
652 ab->hw_params.ring_mask->rxdma2host[i] ||
653 ab->hw_params.ring_mask->host2rxdma[i] ||
654 ab->hw_params.ring_mask->rx_mon_status[i]) {
655 num_irq = 1;
656 }
657
658 irq_grp->num_irq = num_irq;
659 irq_grp->irqs[0] = base_vector + i;
660
661 for (j = 0; j < irq_grp->num_irq; j++) {
662 int irq_idx = irq_grp->irqs[j];
663 int vector = (i % num_vectors) + base_vector;
664 int irq = ath11k_pci_get_msi_irq(ab->dev, vector);
665
666 ab->irq_num[irq_idx] = irq;
667
668 ath11k_dbg(ab, ATH11K_DBG_PCI,
669 "irq:%d group:%d\n", irq, i);
670 ret = request_irq(irq, ath11k_pci_ext_interrupt_handler,
671 IRQF_SHARED,
672 "DP_EXT_IRQ", irq_grp);
673 if (ret) {
674 ath11k_err(ab, "failed request irq %d: %d\n",
675 vector, ret);
676 return ret;
677 }
678
679 disable_irq_nosync(ab->irq_num[irq_idx]);
680 }
681 }
682
683 return 0;
684}
685
Govind Singh7f4beda2020-08-13 12:04:25 +0300686static int ath11k_pci_config_irq(struct ath11k_base *ab)
687{
688 struct ath11k_ce_pipe *ce_pipe;
689 u32 msi_data_start;
690 u32 msi_data_count;
691 u32 msi_irq_start;
692 unsigned int msi_data;
693 int irq, i, ret, irq_idx;
694
695 ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab),
696 "CE", &msi_data_count,
697 &msi_data_start, &msi_irq_start);
698 if (ret)
699 return ret;
700
701 /* Configure CE irqs */
Kalle Valod9d4b5f2020-08-17 13:31:48 +0300702 for (i = 0; i < ab->hw_params.ce_count; i++) {
Govind Singh7f4beda2020-08-13 12:04:25 +0300703 msi_data = (i % msi_data_count) + msi_irq_start;
704 irq = ath11k_pci_get_msi_irq(ab->dev, msi_data);
705 ce_pipe = &ab->ce.ce_pipe[i];
706
Carl Huange3396b82020-08-17 13:31:47 +0300707 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
Govind Singh7f4beda2020-08-13 12:04:25 +0300708 continue;
709
710 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
711
Allen Pais0f01dcb2020-10-07 16:03:09 +0530712 tasklet_setup(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet);
Govind Singh2c3960c2020-08-14 10:10:25 +0300713
Govind Singh7f4beda2020-08-13 12:04:25 +0300714 ret = request_irq(irq, ath11k_pci_ce_interrupt_handler,
715 IRQF_SHARED, irq_name[irq_idx],
716 ce_pipe);
717 if (ret) {
718 ath11k_err(ab, "failed to request irq %d: %d\n",
719 irq_idx, ret);
720 return ret;
721 }
722
723 ab->irq_num[irq_idx] = irq;
Carl Huange5c860e2020-08-17 13:31:50 +0300724 ath11k_pci_ce_irq_disable(ab, i);
Govind Singh7f4beda2020-08-13 12:04:25 +0300725 }
726
Carl Huangd4ecb902020-08-17 13:31:52 +0300727 ret = ath11k_pci_ext_irq_config(ab);
728 if (ret)
729 return ret;
730
Govind Singh7f4beda2020-08-13 12:04:25 +0300731 return 0;
732}
733
734static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
735{
736 struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
737
Anilkumar Kolli967c1d12020-09-08 07:55:35 +0000738 cfg->tgt_ce = ab->hw_params.target_ce_config;
739 cfg->tgt_ce_len = ab->hw_params.target_ce_count;
Govind Singh7f4beda2020-08-13 12:04:25 +0300740
Anilkumar Kolli967c1d12020-09-08 07:55:35 +0000741 cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
742 cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
Govind Singheb8de042020-08-14 10:10:22 +0300743 ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390;
Carl Huange838c142020-10-01 12:34:44 +0300744
745 ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2,
746 &cfg->shadow_reg_v2_len);
Govind Singh7f4beda2020-08-13 12:04:25 +0300747}
748
Govind Singh7f4beda2020-08-13 12:04:25 +0300749static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab)
750{
751 int i;
752
Kalle Valod9d4b5f2020-08-17 13:31:48 +0300753 for (i = 0; i < ab->hw_params.ce_count; i++) {
Carl Huange3396b82020-08-17 13:31:47 +0300754 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
Govind Singh7f4beda2020-08-13 12:04:25 +0300755 continue;
756 ath11k_pci_ce_irq_enable(ab, i);
757 }
758}
759
Govind Singh5697a562020-08-13 12:04:22 +0300760static int ath11k_pci_enable_msi(struct ath11k_pci *ab_pci)
761{
762 struct ath11k_base *ab = ab_pci->ab;
763 struct msi_desc *msi_desc;
764 int num_vectors;
765 int ret;
766
767 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
768 msi_config.total_vectors,
769 msi_config.total_vectors,
770 PCI_IRQ_MSI);
771 if (num_vectors != msi_config.total_vectors) {
772 ath11k_err(ab, "failed to get %d MSI vectors, only %d available",
773 msi_config.total_vectors, num_vectors);
774
775 if (num_vectors >= 0)
776 return -EINVAL;
777 else
778 return num_vectors;
779 }
780
781 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
782 if (!msi_desc) {
783 ath11k_err(ab, "msi_desc is NULL!\n");
784 ret = -EINVAL;
785 goto free_msi_vector;
786 }
787
788 ab_pci->msi_ep_base_data = msi_desc->msg.data;
Anilkumar Kollie8e55d82020-12-07 16:16:04 +0200789 if (msi_desc->msi_attrib.is_64)
790 set_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags);
Govind Singh5697a562020-08-13 12:04:22 +0300791
792 ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
793
794 return 0;
795
796free_msi_vector:
797 pci_free_irq_vectors(ab_pci->pdev);
798
799 return ret;
800}
801
802static void ath11k_pci_disable_msi(struct ath11k_pci *ab_pci)
803{
804 pci_free_irq_vectors(ab_pci->pdev);
805}
806
Govind Singh57626132020-08-13 12:04:21 +0300807static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
808{
809 struct ath11k_base *ab = ab_pci->ab;
810 u16 device_id;
811 int ret = 0;
812
813 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
814 if (device_id != ab_pci->dev_id) {
815 ath11k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
816 device_id, ab_pci->dev_id);
817 ret = -EIO;
818 goto out;
819 }
820
821 ret = pci_assign_resource(pdev, ATH11K_PCI_BAR_NUM);
822 if (ret) {
823 ath11k_err(ab, "failed to assign pci resource: %d\n", ret);
824 goto out;
825 }
826
827 ret = pci_enable_device(pdev);
828 if (ret) {
829 ath11k_err(ab, "failed to enable pci device: %d\n", ret);
830 goto out;
831 }
832
833 ret = pci_request_region(pdev, ATH11K_PCI_BAR_NUM, "ath11k_pci");
834 if (ret) {
835 ath11k_err(ab, "failed to request pci region: %d\n", ret);
836 goto disable_device;
837 }
838
839 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
840 if (ret) {
841 ath11k_err(ab, "failed to set pci dma mask to %d: %d\n",
842 ATH11K_PCI_DMA_MASK, ret);
843 goto release_region;
844 }
845
846 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
847 if (ret) {
848 ath11k_err(ab, "failed to set pci consistent dma mask to %d: %d\n",
849 ATH11K_PCI_DMA_MASK, ret);
850 goto release_region;
851 }
852
853 pci_set_master(pdev);
854
855 ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM);
856 ab->mem = pci_iomap(pdev, ATH11K_PCI_BAR_NUM, 0);
857 if (!ab->mem) {
858 ath11k_err(ab, "failed to map pci bar %d\n", ATH11K_PCI_BAR_NUM);
859 ret = -EIO;
860 goto clear_master;
861 }
862
863 ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem);
864 return 0;
865
866clear_master:
867 pci_clear_master(pdev);
868release_region:
869 pci_release_region(pdev, ATH11K_PCI_BAR_NUM);
870disable_device:
871 pci_disable_device(pdev);
872out:
873 return ret;
874}
875
876static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
877{
878 struct ath11k_base *ab = ab_pci->ab;
879 struct pci_dev *pci_dev = ab_pci->pdev;
880
881 pci_iounmap(pci_dev, ab->mem);
882 ab->mem = NULL;
883 pci_clear_master(pci_dev);
884 pci_release_region(pci_dev, ATH11K_PCI_BAR_NUM);
885 if (pci_is_enabled(pci_dev))
886 pci_disable_device(pci_dev);
887}
888
Govind Singh1399fb82020-08-13 12:04:24 +0300889static int ath11k_pci_power_up(struct ath11k_base *ab)
890{
891 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
892 int ret;
893
Carl Huanga05bd852020-10-01 12:34:43 +0300894 ab_pci->register_window = 0;
895 clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
Carl Huangbabb0ce2020-12-10 16:05:21 +0200896 ath11k_pci_sw_reset(ab_pci->ab, true);
Carl Huangf3c603d2020-08-17 13:31:55 +0300897
Govind Singh1399fb82020-08-13 12:04:24 +0300898 ret = ath11k_mhi_start(ab_pci);
899 if (ret) {
900 ath11k_err(ab, "failed to start mhi: %d\n", ret);
901 return ret;
902 }
903
904 return 0;
905}
906
907static void ath11k_pci_power_down(struct ath11k_base *ab)
908{
909 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
910
Carl Huangbabb0ce2020-12-10 16:05:21 +0200911 ath11k_pci_force_wake(ab_pci->ab);
Govind Singh1399fb82020-08-13 12:04:24 +0300912 ath11k_mhi_stop(ab_pci);
Carl Huanga05bd852020-10-01 12:34:43 +0300913 clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
Carl Huangbabb0ce2020-12-10 16:05:21 +0200914 ath11k_pci_sw_reset(ab_pci->ab, false);
Govind Singh1399fb82020-08-13 12:04:24 +0300915}
916
Carl Huangfa5917e2020-12-11 19:35:42 +0200917static int ath11k_pci_hif_suspend(struct ath11k_base *ab)
918{
919 struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
920
921 ath11k_mhi_suspend(ar_pci);
922
923 return 0;
924}
925
926static int ath11k_pci_hif_resume(struct ath11k_base *ab)
927{
928 struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
929
930 ath11k_mhi_resume(ar_pci);
931
932 return 0;
933}
934
Govind Singh2c3960c2020-08-14 10:10:25 +0300935static void ath11k_pci_kill_tasklets(struct ath11k_base *ab)
936{
937 int i;
938
Kalle Valod9d4b5f2020-08-17 13:31:48 +0300939 for (i = 0; i < ab->hw_params.ce_count; i++) {
Govind Singh2c3960c2020-08-14 10:10:25 +0300940 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
941
Carl Huange3396b82020-08-17 13:31:47 +0300942 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
Govind Singh2c3960c2020-08-14 10:10:25 +0300943 continue;
944
945 tasklet_kill(&ce_pipe->intr_tq);
946 }
947}
948
Carl Huangd578ec2a2020-12-11 19:35:49 +0200949static void ath11k_pci_ce_irq_disable_sync(struct ath11k_base *ab)
Govind Singh7f4beda2020-08-13 12:04:25 +0300950{
Govind Singh2c3960c2020-08-14 10:10:25 +0300951 ath11k_pci_ce_irqs_disable(ab);
952 ath11k_pci_sync_ce_irqs(ab);
953 ath11k_pci_kill_tasklets(ab);
Carl Huangd578ec2a2020-12-11 19:35:49 +0200954}
955
956static void ath11k_pci_stop(struct ath11k_base *ab)
957{
958 ath11k_pci_ce_irq_disable_sync(ab);
Govind Singh7f4beda2020-08-13 12:04:25 +0300959 ath11k_ce_cleanup_pipes(ab);
960}
961
962static int ath11k_pci_start(struct ath11k_base *ab)
963{
Carl Huanga05bd852020-10-01 12:34:43 +0300964 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
965
966 set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
967
Govind Singh7f4beda2020-08-13 12:04:25 +0300968 ath11k_pci_ce_irqs_enable(ab);
Govind Singh2c3960c2020-08-14 10:10:25 +0300969 ath11k_ce_rx_post_buf(ab);
970
971 return 0;
972}
973
Carl Huangd578ec2a2020-12-11 19:35:49 +0200974static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab)
975{
976 ath11k_pci_ce_irqs_enable(ab);
977}
978
979static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab)
980{
981 ath11k_pci_ce_irq_disable_sync(ab);
982}
983
Govind Singh2c3960c2020-08-14 10:10:25 +0300984static int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
985 u8 *ul_pipe, u8 *dl_pipe)
986{
987 const struct service_to_pipe *entry;
988 bool ul_set = false, dl_set = false;
989 int i;
990
Anilkumar Kolli967c1d12020-09-08 07:55:35 +0000991 for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
992 entry = &ab->hw_params.svc_to_ce_map[i];
Govind Singh2c3960c2020-08-14 10:10:25 +0300993
994 if (__le32_to_cpu(entry->service_id) != service_id)
995 continue;
996
997 switch (__le32_to_cpu(entry->pipedir)) {
998 case PIPEDIR_NONE:
999 break;
1000 case PIPEDIR_IN:
1001 WARN_ON(dl_set);
1002 *dl_pipe = __le32_to_cpu(entry->pipenum);
1003 dl_set = true;
1004 break;
1005 case PIPEDIR_OUT:
1006 WARN_ON(ul_set);
1007 *ul_pipe = __le32_to_cpu(entry->pipenum);
1008 ul_set = true;
1009 break;
1010 case PIPEDIR_INOUT:
1011 WARN_ON(dl_set);
1012 WARN_ON(ul_set);
1013 *dl_pipe = __le32_to_cpu(entry->pipenum);
1014 *ul_pipe = __le32_to_cpu(entry->pipenum);
1015 dl_set = true;
1016 ul_set = true;
1017 break;
1018 }
1019 }
1020
1021 if (WARN_ON(!ul_set || !dl_set))
1022 return -ENOENT;
Govind Singh7f4beda2020-08-13 12:04:25 +03001023
1024 return 0;
1025}
1026
1027static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
1028 .start = ath11k_pci_start,
1029 .stop = ath11k_pci_stop,
Govind Singh654e9592020-08-14 10:10:23 +03001030 .read32 = ath11k_pci_read32,
1031 .write32 = ath11k_pci_write32,
Govind Singh1399fb82020-08-13 12:04:24 +03001032 .power_down = ath11k_pci_power_down,
1033 .power_up = ath11k_pci_power_up,
Carl Huangfa5917e2020-12-11 19:35:42 +02001034 .suspend = ath11k_pci_hif_suspend,
1035 .resume = ath11k_pci_hif_resume,
Carl Huangd4ecb902020-08-17 13:31:52 +03001036 .irq_enable = ath11k_pci_ext_irq_enable,
1037 .irq_disable = ath11k_pci_ext_irq_disable,
Govind Singhc4eacab2020-08-14 10:10:24 +03001038 .get_msi_address = ath11k_pci_get_msi_address,
1039 .get_user_msi_vector = ath11k_get_user_msi_assignment,
Govind Singh2c3960c2020-08-14 10:10:25 +03001040 .map_service_to_pipe = ath11k_pci_map_service_to_pipe,
Carl Huangd578ec2a2020-12-11 19:35:49 +02001041 .ce_irq_enable = ath11k_pci_hif_ce_irq_enable,
1042 .ce_irq_disable = ath11k_pci_hif_ce_irq_disable,
Govind Singh1399fb82020-08-13 12:04:24 +03001043};
1044
Govind Singh6e0355a2020-08-13 12:04:20 +03001045static int ath11k_pci_probe(struct pci_dev *pdev,
1046 const struct pci_device_id *pci_dev)
1047{
1048 struct ath11k_base *ab;
Govind Singh57626132020-08-13 12:04:21 +03001049 struct ath11k_pci *ab_pci;
Kalle Valo18ac1662020-09-29 20:15:32 +03001050 u32 soc_hw_version, soc_hw_version_major, soc_hw_version_minor;
Govind Singh57626132020-08-13 12:04:21 +03001051 int ret;
Govind Singh6e0355a2020-08-13 12:04:20 +03001052
1053 dev_warn(&pdev->dev, "WARNING: ath11k PCI support is experimental!\n");
1054
Govind Singh1ff8ed72020-08-13 12:04:26 +03001055 ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI,
1056 &ath11k_pci_bus_params);
Govind Singh6e0355a2020-08-13 12:04:20 +03001057 if (!ab) {
1058 dev_err(&pdev->dev, "failed to allocate ath11k base\n");
1059 return -ENOMEM;
1060 }
1061
1062 ab->dev = &pdev->dev;
Govind Singh6e0355a2020-08-13 12:04:20 +03001063 pci_set_drvdata(pdev, ab);
Govind Singh57626132020-08-13 12:04:21 +03001064 ab_pci = ath11k_pci_priv(ab);
1065 ab_pci->dev_id = pci_dev->device;
1066 ab_pci->ab = ab;
Govind Singh5697a562020-08-13 12:04:22 +03001067 ab_pci->pdev = pdev;
Govind Singh7f4beda2020-08-13 12:04:25 +03001068 ab->hif.ops = &ath11k_pci_hif_ops;
Govind Singh57626132020-08-13 12:04:21 +03001069 pci_set_drvdata(pdev, ab);
Govind Singh654e9592020-08-14 10:10:23 +03001070 spin_lock_init(&ab_pci->window_lock);
Govind Singh57626132020-08-13 12:04:21 +03001071
1072 ret = ath11k_pci_claim(ab_pci, pdev);
1073 if (ret) {
1074 ath11k_err(ab, "failed to claim device: %d\n", ret);
1075 goto err_free_core;
1076 }
Govind Singh6e0355a2020-08-13 12:04:20 +03001077
Kalle Valo18ac1662020-09-29 20:15:32 +03001078 switch (pci_dev->device) {
1079 case QCA6390_DEVICE_ID:
1080 soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION);
1081 soc_hw_version_major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
1082 soc_hw_version);
1083 soc_hw_version_minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
1084 soc_hw_version);
1085
1086 ath11k_dbg(ab, ATH11K_DBG_PCI, "pci tcsr_soc_hw_version major %d minor %d\n",
1087 soc_hw_version_major, soc_hw_version_minor);
1088
1089 switch (soc_hw_version_major) {
1090 case 2:
1091 ab->hw_rev = ATH11K_HW_QCA6390_HW20;
1092 break;
1093 default:
1094 dev_err(&pdev->dev, "Unsupported QCA6390 SOC hardware version: %d %d\n",
1095 soc_hw_version_major, soc_hw_version_minor);
1096 ret = -EOPNOTSUPP;
1097 goto err_pci_free_region;
1098 }
1099 break;
1100 default:
1101 dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
1102 pci_dev->device);
1103 ret = -EOPNOTSUPP;
1104 goto err_pci_free_region;
1105 }
1106
Govind Singh5697a562020-08-13 12:04:22 +03001107 ret = ath11k_pci_enable_msi(ab_pci);
1108 if (ret) {
1109 ath11k_err(ab, "failed to enable msi: %d\n", ret);
1110 goto err_pci_free_region;
1111 }
1112
Kalle Valob8246f82020-08-13 12:04:23 +03001113 ret = ath11k_core_pre_init(ab);
1114 if (ret)
1115 goto err_pci_disable_msi;
1116
Govind Singh1399fb82020-08-13 12:04:24 +03001117 ret = ath11k_mhi_register(ab_pci);
1118 if (ret) {
1119 ath11k_err(ab, "failed to register mhi: %d\n", ret);
1120 goto err_pci_disable_msi;
1121 }
1122
Govind Singh7f4beda2020-08-13 12:04:25 +03001123 ret = ath11k_hal_srng_init(ab);
1124 if (ret)
1125 goto err_mhi_unregister;
1126
1127 ret = ath11k_ce_alloc_pipes(ab);
1128 if (ret) {
1129 ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1130 goto err_hal_srng_deinit;
1131 }
1132
1133 ath11k_pci_init_qmi_ce_config(ab);
1134
1135 ret = ath11k_pci_config_irq(ab);
1136 if (ret) {
1137 ath11k_err(ab, "failed to config irq: %d\n", ret);
1138 goto err_ce_free;
1139 }
1140
1141 ret = ath11k_core_init(ab);
1142 if (ret) {
1143 ath11k_err(ab, "failed to init core: %d\n", ret);
1144 goto err_free_irq;
1145 }
Govind Singh6e0355a2020-08-13 12:04:20 +03001146 return 0;
Govind Singh57626132020-08-13 12:04:21 +03001147
Govind Singh7f4beda2020-08-13 12:04:25 +03001148err_free_irq:
1149 ath11k_pci_free_irq(ab);
1150
1151err_ce_free:
1152 ath11k_ce_free_pipes(ab);
1153
1154err_hal_srng_deinit:
1155 ath11k_hal_srng_deinit(ab);
1156
1157err_mhi_unregister:
1158 ath11k_mhi_unregister(ab_pci);
1159
Kalle Valob8246f82020-08-13 12:04:23 +03001160err_pci_disable_msi:
1161 ath11k_pci_disable_msi(ab_pci);
1162
Govind Singh5697a562020-08-13 12:04:22 +03001163err_pci_free_region:
1164 ath11k_pci_free_region(ab_pci);
1165
Govind Singh57626132020-08-13 12:04:21 +03001166err_free_core:
1167 ath11k_core_free(ab);
Govind Singh5697a562020-08-13 12:04:22 +03001168
Govind Singh57626132020-08-13 12:04:21 +03001169 return ret;
Govind Singh6e0355a2020-08-13 12:04:20 +03001170}
1171
1172static void ath11k_pci_remove(struct pci_dev *pdev)
1173{
1174 struct ath11k_base *ab = pci_get_drvdata(pdev);
Govind Singh57626132020-08-13 12:04:21 +03001175 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
Govind Singh6e0355a2020-08-13 12:04:20 +03001176
Anilkumar Kolli61a57e52020-12-08 09:52:55 +02001177 if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1178 ath11k_pci_power_down(ab);
1179 ath11k_debugfs_soc_destroy(ab);
1180 ath11k_qmi_deinit_service(ab);
1181 goto qmi_fail;
1182 }
1183
Govind Singh6e0355a2020-08-13 12:04:20 +03001184 set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
Carl Huang6fbd8892020-09-30 13:51:10 +03001185
1186 ath11k_core_deinit(ab);
1187
Anilkumar Kolli61a57e52020-12-08 09:52:55 +02001188qmi_fail:
Govind Singh1399fb82020-08-13 12:04:24 +03001189 ath11k_mhi_unregister(ab_pci);
Carl Huang6fbd8892020-09-30 13:51:10 +03001190
1191 ath11k_pci_free_irq(ab);
Govind Singh5697a562020-08-13 12:04:22 +03001192 ath11k_pci_disable_msi(ab_pci);
Govind Singh57626132020-08-13 12:04:21 +03001193 ath11k_pci_free_region(ab_pci);
Carl Huang6fbd8892020-09-30 13:51:10 +03001194
1195 ath11k_hal_srng_deinit(ab);
1196 ath11k_ce_free_pipes(ab);
Govind Singh6e0355a2020-08-13 12:04:20 +03001197 ath11k_core_free(ab);
1198}
1199
Govind Singh1399fb82020-08-13 12:04:24 +03001200static void ath11k_pci_shutdown(struct pci_dev *pdev)
1201{
1202 struct ath11k_base *ab = pci_get_drvdata(pdev);
1203
1204 ath11k_pci_power_down(ab);
1205}
1206
Carl Huangd1b0c3382020-12-11 19:35:50 +02001207static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
1208{
1209 struct ath11k_base *ab = dev_get_drvdata(dev);
1210 int ret;
1211
1212 ret = ath11k_core_suspend(ab);
1213 if (ret)
1214 ath11k_warn(ab, "failed to suspend core: %d\n", ret);
1215
1216 return ret;
1217}
1218
1219static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
1220{
1221 struct ath11k_base *ab = dev_get_drvdata(dev);
1222 int ret;
1223
1224 ret = ath11k_core_resume(ab);
1225 if (ret)
1226 ath11k_warn(ab, "failed to resume core: %d\n", ret);
1227
1228 return ret;
1229}
1230
1231static SIMPLE_DEV_PM_OPS(ath11k_pci_pm_ops,
1232 ath11k_pci_pm_suspend,
1233 ath11k_pci_pm_resume);
1234
Govind Singh6e0355a2020-08-13 12:04:20 +03001235static struct pci_driver ath11k_pci_driver = {
1236 .name = "ath11k_pci",
1237 .id_table = ath11k_pci_id_table,
1238 .probe = ath11k_pci_probe,
1239 .remove = ath11k_pci_remove,
Govind Singh1399fb82020-08-13 12:04:24 +03001240 .shutdown = ath11k_pci_shutdown,
Carl Huangd1b0c3382020-12-11 19:35:50 +02001241#ifdef CONFIG_PM
1242 .driver.pm = &ath11k_pci_pm_ops,
1243#endif
Govind Singh6e0355a2020-08-13 12:04:20 +03001244};
1245
1246static int ath11k_pci_init(void)
1247{
1248 int ret;
1249
1250 ret = pci_register_driver(&ath11k_pci_driver);
1251 if (ret)
1252 pr_err("failed to register ath11k pci driver: %d\n",
1253 ret);
1254
1255 return ret;
1256}
1257module_init(ath11k_pci_init);
1258
1259static void ath11k_pci_exit(void)
1260{
1261 pci_unregister_driver(&ath11k_pci_driver);
1262}
1263
1264module_exit(ath11k_pci_exit);
1265
1266MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN PCIe devices");
1267MODULE_LICENSE("Dual BSD/GPL");
Devin Bayer3dbd7fe2020-12-07 18:16:55 +02001268
1269/* QCA639x 2.0 firmware files */
1270MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/" ATH11K_BOARD_API2_FILE);
1271MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/" ATH11K_AMSS_FILE);
1272MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/" ATH11K_M3_FILE);