blob: 6abe9394e8d7a88fbcb848ec215d575c48f995c2 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
Kalle Valo650b91f2013-11-20 10:00:49 +020022#include <linux/bitops.h>
Kalle Valo5e3dd152013-06-12 20:52:10 +030023
24#include "core.h"
25#include "debug.h"
26
27#include "targaddrs.h"
28#include "bmi.h"
29
30#include "hif.h"
31#include "htc.h"
32
33#include "ce.h"
34#include "pci.h"
35
Michal Kaziorcfe9c452013-11-25 14:06:27 +010036enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
40};
41
Kalle Valo35098462014-03-28 09:32:27 +020042enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
45};
46
Michal Kaziorcfe9c452013-11-25 14:06:27 +010047static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
Kalle Valo35098462014-03-28 09:32:27 +020048static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
Michal Kaziorcfe9c452013-11-25 14:06:27 +010049
Michal Kaziorcfe9c452013-11-25 14:06:27 +010050module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
51MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
52
Kalle Valo35098462014-03-28 09:32:27 +020053module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
54MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
55
Kalle Valo0399eca2014-03-28 09:32:21 +020056/* how long wait to wait for target to initialise, in ms */
57#define ATH10K_PCI_TARGET_WAIT 3000
Michal Kazior61c95ce2014-05-14 16:56:16 +030058#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
Kalle Valo0399eca2014-03-28 09:32:21 +020059
Kalle Valo5e3dd152013-06-12 20:52:10 +030060#define QCA988X_2_0_DEVICE_ID (0x003c)
61
Benoit Taine9baa3c32014-08-08 15:56:03 +020062static const struct pci_device_id ath10k_pci_id_table[] = {
Kalle Valo5e3dd152013-06-12 20:52:10 +030063 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
64 {0}
65};
66
67static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
68 u32 *data);
69
Michal Kazior728f95e2014-08-22 14:33:14 +020070static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +010071static int ath10k_pci_cold_reset(struct ath10k *ar);
72static int ath10k_pci_warm_reset(struct ath10k *ar);
Michal Kaziord7fb47f2013-11-08 08:01:26 +010073static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +010074static int ath10k_pci_init_irq(struct ath10k *ar);
75static int ath10k_pci_deinit_irq(struct ath10k *ar);
76static int ath10k_pci_request_irq(struct ath10k *ar);
77static void ath10k_pci_free_irq(struct ath10k *ar);
Michal Kazior85622cd2013-11-25 14:06:22 +010078static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
79 struct ath10k_ce_pipe *rx_pipe,
80 struct bmi_xfer *xfer);
Kalle Valo5e3dd152013-06-12 20:52:10 +030081
82static const struct ce_attr host_ce_config_wlan[] = {
Kalle Valo48e9c222013-09-01 10:01:32 +030083 /* CE0: host->target HTC control and raw streams */
84 {
85 .flags = CE_ATTR_FLAGS,
86 .src_nentries = 16,
87 .src_sz_max = 256,
88 .dest_nentries = 0,
89 },
90
91 /* CE1: target->host HTT + HTC control */
92 {
93 .flags = CE_ATTR_FLAGS,
94 .src_nentries = 0,
95 .src_sz_max = 512,
96 .dest_nentries = 512,
97 },
98
99 /* CE2: target->host WMI */
100 {
101 .flags = CE_ATTR_FLAGS,
102 .src_nentries = 0,
103 .src_sz_max = 2048,
104 .dest_nentries = 32,
105 },
106
107 /* CE3: host->target WMI */
108 {
109 .flags = CE_ATTR_FLAGS,
110 .src_nentries = 32,
111 .src_sz_max = 2048,
112 .dest_nentries = 0,
113 },
114
115 /* CE4: host->target HTT */
116 {
117 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
118 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
119 .src_sz_max = 256,
120 .dest_nentries = 0,
121 },
122
123 /* CE5: unused */
124 {
125 .flags = CE_ATTR_FLAGS,
126 .src_nentries = 0,
127 .src_sz_max = 0,
128 .dest_nentries = 0,
129 },
130
131 /* CE6: target autonomous hif_memcpy */
132 {
133 .flags = CE_ATTR_FLAGS,
134 .src_nentries = 0,
135 .src_sz_max = 0,
136 .dest_nentries = 0,
137 },
138
139 /* CE7: ce_diag, the Diagnostic Window */
140 {
141 .flags = CE_ATTR_FLAGS,
142 .src_nentries = 2,
143 .src_sz_max = DIAG_TRANSFER_LIMIT,
144 .dest_nentries = 2,
145 },
Kalle Valo5e3dd152013-06-12 20:52:10 +0300146};
147
148/* Target firmware's Copy Engine configuration. */
149static const struct ce_pipe_config target_ce_config_wlan[] = {
Kalle Valod88effb2013-09-01 10:01:39 +0300150 /* CE0: host->target HTC control and raw streams */
151 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300152 .pipenum = __cpu_to_le32(0),
153 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
154 .nentries = __cpu_to_le32(32),
155 .nbytes_max = __cpu_to_le32(256),
156 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
157 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300158 },
159
160 /* CE1: target->host HTT + HTC control */
161 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300162 .pipenum = __cpu_to_le32(1),
163 .pipedir = __cpu_to_le32(PIPEDIR_IN),
164 .nentries = __cpu_to_le32(32),
165 .nbytes_max = __cpu_to_le32(512),
166 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
167 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300168 },
169
170 /* CE2: target->host WMI */
171 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300172 .pipenum = __cpu_to_le32(2),
173 .pipedir = __cpu_to_le32(PIPEDIR_IN),
174 .nentries = __cpu_to_le32(32),
175 .nbytes_max = __cpu_to_le32(2048),
176 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
177 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300178 },
179
180 /* CE3: host->target WMI */
181 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300182 .pipenum = __cpu_to_le32(3),
183 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
184 .nentries = __cpu_to_le32(32),
185 .nbytes_max = __cpu_to_le32(2048),
186 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
187 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300188 },
189
190 /* CE4: host->target HTT */
191 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300192 .pipenum = __cpu_to_le32(4),
193 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
194 .nentries = __cpu_to_le32(256),
195 .nbytes_max = __cpu_to_le32(256),
196 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
197 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300198 },
199
Kalle Valo5e3dd152013-06-12 20:52:10 +0300200 /* NB: 50% of src nentries, since tx has 2 frags */
Kalle Valod88effb2013-09-01 10:01:39 +0300201
202 /* CE5: unused */
203 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300204 .pipenum = __cpu_to_le32(5),
205 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
206 .nentries = __cpu_to_le32(32),
207 .nbytes_max = __cpu_to_le32(2048),
208 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
209 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300210 },
211
212 /* CE6: Reserved for target autonomous hif_memcpy */
213 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300214 .pipenum = __cpu_to_le32(6),
215 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
216 .nentries = __cpu_to_le32(32),
217 .nbytes_max = __cpu_to_le32(4096),
218 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
219 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300220 },
221
Kalle Valo5e3dd152013-06-12 20:52:10 +0300222 /* CE7 used only by Host */
223};
224
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300225/*
226 * Map from service/endpoint to Copy Engine.
227 * This table is derived from the CE_PCI TABLE, above.
228 * It is passed to the Target at startup for use by firmware.
229 */
230static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
231 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300232 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
233 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
234 __cpu_to_le32(3),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300235 },
236 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300237 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
238 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
239 __cpu_to_le32(2),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300240 },
241 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300242 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
243 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
244 __cpu_to_le32(3),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300245 },
246 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300247 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
248 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
249 __cpu_to_le32(2),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300250 },
251 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300252 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
253 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
254 __cpu_to_le32(3),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300255 },
256 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300257 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
258 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
259 __cpu_to_le32(2),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300260 },
261 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300262 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
263 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
264 __cpu_to_le32(3),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300265 },
266 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300267 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
268 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
269 __cpu_to_le32(2),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300270 },
271 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300272 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
273 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
274 __cpu_to_le32(3),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300275 },
276 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300277 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
278 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
279 __cpu_to_le32(2),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300280 },
281 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300282 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
283 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
284 __cpu_to_le32(0),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300285 },
286 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300287 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
288 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
289 __cpu_to_le32(1),
290 },
291 { /* not used */
292 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
293 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
294 __cpu_to_le32(0),
295 },
296 { /* not used */
297 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
298 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
299 __cpu_to_le32(1),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300300 },
301 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300302 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
303 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
304 __cpu_to_le32(4),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300305 },
306 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300307 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
308 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
309 __cpu_to_le32(1),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300310 },
311
312 /* (Additions here) */
313
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300314 { /* must be last */
315 __cpu_to_le32(0),
316 __cpu_to_le32(0),
317 __cpu_to_le32(0),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300318 },
319};
320
Michal Kaziore5398872013-11-25 14:06:20 +0100321static bool ath10k_pci_irq_pending(struct ath10k *ar)
322{
323 u32 cause;
324
325 /* Check if the shared legacy irq is for us */
326 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
327 PCIE_INTR_CAUSE_ADDRESS);
328 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
329 return true;
330
331 return false;
332}
333
Michal Kazior26852182013-11-25 14:06:25 +0100334static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
335{
336 /* IMPORTANT: INTR_CLR register has to be set after
337 * INTR_ENABLE is set to 0, otherwise interrupt can not be
338 * really cleared. */
339 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
340 0);
341 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
342 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
343
344 /* IMPORTANT: this extra read transaction is required to
345 * flush the posted write buffer. */
346 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
347 PCIE_INTR_ENABLE_ADDRESS);
348}
349
350static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
351{
352 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
353 PCIE_INTR_ENABLE_ADDRESS,
354 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
355
356 /* IMPORTANT: this extra read transaction is required to
357 * flush the posted write buffer. */
358 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
359 PCIE_INTR_ENABLE_ADDRESS);
360}
361
Michal Kazior403d6272014-08-22 14:23:31 +0200362static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
Michal Kaziorab977bd2013-11-25 14:06:26 +0100363{
Michal Kaziorab977bd2013-11-25 14:06:26 +0100364 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
365
Michal Kazior403d6272014-08-22 14:23:31 +0200366 if (ar_pci->num_msi_intrs > 1)
367 return "msi-x";
368 else if (ar_pci->num_msi_intrs == 1)
369 return "msi";
370 else
371 return "legacy";
Michal Kaziorab977bd2013-11-25 14:06:26 +0100372}
373
Michal Kazior728f95e2014-08-22 14:33:14 +0200374static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
Michal Kaziorab977bd2013-11-25 14:06:26 +0100375{
Michal Kazior728f95e2014-08-22 14:33:14 +0200376 struct ath10k *ar = pipe->hif_ce_state;
Michal Kaziorab977bd2013-11-25 14:06:26 +0100377 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior728f95e2014-08-22 14:33:14 +0200378 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
379 struct sk_buff *skb;
380 dma_addr_t paddr;
Michal Kaziorab977bd2013-11-25 14:06:26 +0100381 int ret;
382
Michal Kazior728f95e2014-08-22 14:33:14 +0200383 lockdep_assert_held(&ar_pci->ce_lock);
384
385 skb = dev_alloc_skb(pipe->buf_sz);
386 if (!skb)
387 return -ENOMEM;
388
389 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
390
391 paddr = dma_map_single(ar->dev, skb->data,
392 skb->len + skb_tailroom(skb),
393 DMA_FROM_DEVICE);
394 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200395 ath10k_warn(ar, "failed to dma map pci rx buf\n");
Michal Kazior728f95e2014-08-22 14:33:14 +0200396 dev_kfree_skb_any(skb);
397 return -EIO;
398 }
399
400 ATH10K_SKB_CB(skb)->paddr = paddr;
401
402 ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
Michal Kaziorab977bd2013-11-25 14:06:26 +0100403 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200404 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
Michal Kazior728f95e2014-08-22 14:33:14 +0200405 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
406 DMA_FROM_DEVICE);
407 dev_kfree_skb_any(skb);
Michal Kaziorab977bd2013-11-25 14:06:26 +0100408 return ret;
409 }
410
411 return 0;
412}
413
Michal Kazior728f95e2014-08-22 14:33:14 +0200414static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
Michal Kaziorab977bd2013-11-25 14:06:26 +0100415{
Michal Kazior728f95e2014-08-22 14:33:14 +0200416 struct ath10k *ar = pipe->hif_ce_state;
417 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
418 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
419 int ret, num;
420
421 lockdep_assert_held(&ar_pci->ce_lock);
422
423 if (pipe->buf_sz == 0)
424 return;
425
426 if (!ce_pipe->dest_ring)
427 return;
428
429 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
430 while (num--) {
431 ret = __ath10k_pci_rx_post_buf(pipe);
432 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200433 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
Michal Kazior728f95e2014-08-22 14:33:14 +0200434 mod_timer(&ar_pci->rx_post_retry, jiffies +
435 ATH10K_PCI_RX_POST_RETRY_MS);
436 break;
437 }
438 }
439}
440
441static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
442{
443 struct ath10k *ar = pipe->hif_ce_state;
444 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
445
446 spin_lock_bh(&ar_pci->ce_lock);
447 __ath10k_pci_rx_post_pipe(pipe);
448 spin_unlock_bh(&ar_pci->ce_lock);
449}
450
451static void ath10k_pci_rx_post(struct ath10k *ar)
452{
453 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
454 int i;
455
456 spin_lock_bh(&ar_pci->ce_lock);
457 for (i = 0; i < CE_COUNT; i++)
458 __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
459 spin_unlock_bh(&ar_pci->ce_lock);
460}
461
462static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
463{
464 struct ath10k *ar = (void *)ptr;
465
466 ath10k_pci_rx_post(ar);
Michal Kaziorab977bd2013-11-25 14:06:26 +0100467}
468
Kalle Valo5e3dd152013-06-12 20:52:10 +0300469/*
470 * Diagnostic read/write access is provided for startup/config/debug usage.
471 * Caller must guarantee proper alignment, when applicable, and single user
472 * at any moment.
473 */
474static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
475 int nbytes)
476{
477 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
478 int ret = 0;
479 u32 buf;
480 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
481 unsigned int id;
482 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200483 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300484 /* Host buffer address in CE space */
485 u32 ce_data;
486 dma_addr_t ce_data_base = 0;
487 void *data_buf = NULL;
488 int i;
489
490 /*
491 * This code cannot handle reads to non-memory space. Redirect to the
492 * register read fn but preserve the multi word read capability of
493 * this fn
494 */
495 if (address < DRAM_BASE_ADDRESS) {
496 if (!IS_ALIGNED(address, 4) ||
497 !IS_ALIGNED((unsigned long)data, 4))
498 return -EIO;
499
500 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
501 ar, address, (u32 *)data)) == 0)) {
502 nbytes -= sizeof(u32);
503 address += sizeof(u32);
504 data += sizeof(u32);
505 }
506 return ret;
507 }
508
509 ce_diag = ar_pci->ce_diag;
510
511 /*
512 * Allocate a temporary bounce buffer to hold caller's data
513 * to be DMA'ed from Target. This guarantees
514 * 1) 4-byte alignment
515 * 2) Buffer in DMA-able space
516 */
517 orig_nbytes = nbytes;
Michal Kazior68c03242014-03-28 10:02:35 +0200518 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
519 orig_nbytes,
520 &ce_data_base,
521 GFP_ATOMIC);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300522
523 if (!data_buf) {
524 ret = -ENOMEM;
525 goto done;
526 }
527 memset(data_buf, 0, orig_nbytes);
528
529 remaining_bytes = orig_nbytes;
530 ce_data = ce_data_base;
531 while (remaining_bytes) {
532 nbytes = min_t(unsigned int, remaining_bytes,
533 DIAG_TRANSFER_LIMIT);
534
Michal Kazior728f95e2014-08-22 14:33:14 +0200535 ret = ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300536 if (ret != 0)
537 goto done;
538
539 /* Request CE to send from Target(!) address to Host buffer */
540 /*
541 * The address supplied by the caller is in the
542 * Target CPU virtual address space.
543 *
544 * In order to use this address with the diagnostic CE,
545 * convert it from Target CPU virtual address space
546 * to CE address space
547 */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300548 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
549 address);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300550
551 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
552 0);
553 if (ret)
554 goto done;
555
556 i = 0;
557 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
558 &completed_nbytes,
559 &id) != 0) {
560 mdelay(1);
561 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
562 ret = -EBUSY;
563 goto done;
564 }
565 }
566
567 if (nbytes != completed_nbytes) {
568 ret = -EIO;
569 goto done;
570 }
571
572 if (buf != (u32) address) {
573 ret = -EIO;
574 goto done;
575 }
576
577 i = 0;
578 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
579 &completed_nbytes,
580 &id, &flags) != 0) {
581 mdelay(1);
582
583 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
584 ret = -EBUSY;
585 goto done;
586 }
587 }
588
589 if (nbytes != completed_nbytes) {
590 ret = -EIO;
591 goto done;
592 }
593
594 if (buf != ce_data) {
595 ret = -EIO;
596 goto done;
597 }
598
599 remaining_bytes -= nbytes;
600 address += nbytes;
601 ce_data += nbytes;
602 }
603
604done:
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300605 if (ret == 0)
606 memcpy(data, data_buf, orig_nbytes);
607 else
Michal Kazior7aa7a722014-08-25 12:09:38 +0200608 ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
Kalle Valo50f87a62014-03-28 09:32:52 +0200609 address, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300610
611 if (data_buf)
Michal Kazior68c03242014-03-28 10:02:35 +0200612 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
613 ce_data_base);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300614
615 return ret;
616}
617
Kalle Valo3d29a3e2014-08-25 08:37:26 +0300618static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
619{
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300620 __le32 val = 0;
621 int ret;
622
623 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
624 *value = __le32_to_cpu(val);
625
626 return ret;
Kalle Valo3d29a3e2014-08-25 08:37:26 +0300627}
628
629static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
630 u32 src, u32 len)
631{
632 u32 host_addr, addr;
633 int ret;
634
635 host_addr = host_interest_item_address(src);
636
637 ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
638 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200639 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
Kalle Valo3d29a3e2014-08-25 08:37:26 +0300640 src, ret);
641 return ret;
642 }
643
644 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
645 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200646 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
Kalle Valo3d29a3e2014-08-25 08:37:26 +0300647 addr, len, ret);
648 return ret;
649 }
650
651 return 0;
652}
653
654#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
655 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len);
656
Kalle Valo5e3dd152013-06-12 20:52:10 +0300657/* Read 4-byte aligned data from Target memory or register */
658static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
659 u32 *data)
660{
661 /* Assume range doesn't cross this boundary */
662 if (address >= DRAM_BASE_ADDRESS)
Kalle Valo3d29a3e2014-08-25 08:37:26 +0300663 return ath10k_pci_diag_read32(ar, address, data);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300664
Kalle Valo5e3dd152013-06-12 20:52:10 +0300665 *data = ath10k_pci_read32(ar, address);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300666 return 0;
667}
668
669static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
670 const void *data, int nbytes)
671{
672 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
673 int ret = 0;
674 u32 buf;
675 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
676 unsigned int id;
677 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200678 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300679 void *data_buf = NULL;
680 u32 ce_data; /* Host buffer address in CE space */
681 dma_addr_t ce_data_base = 0;
682 int i;
683
684 ce_diag = ar_pci->ce_diag;
685
686 /*
687 * Allocate a temporary bounce buffer to hold caller's data
688 * to be DMA'ed to Target. This guarantees
689 * 1) 4-byte alignment
690 * 2) Buffer in DMA-able space
691 */
692 orig_nbytes = nbytes;
Michal Kazior68c03242014-03-28 10:02:35 +0200693 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
694 orig_nbytes,
695 &ce_data_base,
696 GFP_ATOMIC);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300697 if (!data_buf) {
698 ret = -ENOMEM;
699 goto done;
700 }
701
702 /* Copy caller's data to allocated DMA buf */
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300703 memcpy(data_buf, data, orig_nbytes);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300704
705 /*
706 * The address supplied by the caller is in the
707 * Target CPU virtual address space.
708 *
709 * In order to use this address with the diagnostic CE,
710 * convert it from
711 * Target CPU virtual address space
712 * to
713 * CE address space
714 */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300715 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300716
717 remaining_bytes = orig_nbytes;
718 ce_data = ce_data_base;
719 while (remaining_bytes) {
720 /* FIXME: check cast */
721 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
722
723 /* Set up to receive directly into Target(!) address */
Michal Kazior728f95e2014-08-22 14:33:14 +0200724 ret = ath10k_ce_rx_post_buf(ce_diag, NULL, address);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300725 if (ret != 0)
726 goto done;
727
728 /*
729 * Request CE to send caller-supplied data that
730 * was copied to bounce buffer to Target(!) address.
731 */
732 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
733 nbytes, 0, 0);
734 if (ret != 0)
735 goto done;
736
737 i = 0;
738 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
739 &completed_nbytes,
740 &id) != 0) {
741 mdelay(1);
742
743 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
744 ret = -EBUSY;
745 goto done;
746 }
747 }
748
749 if (nbytes != completed_nbytes) {
750 ret = -EIO;
751 goto done;
752 }
753
754 if (buf != ce_data) {
755 ret = -EIO;
756 goto done;
757 }
758
759 i = 0;
760 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
761 &completed_nbytes,
762 &id, &flags) != 0) {
763 mdelay(1);
764
765 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
766 ret = -EBUSY;
767 goto done;
768 }
769 }
770
771 if (nbytes != completed_nbytes) {
772 ret = -EIO;
773 goto done;
774 }
775
776 if (buf != address) {
777 ret = -EIO;
778 goto done;
779 }
780
781 remaining_bytes -= nbytes;
782 address += nbytes;
783 ce_data += nbytes;
784 }
785
786done:
787 if (data_buf) {
Michal Kazior68c03242014-03-28 10:02:35 +0200788 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
789 ce_data_base);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300790 }
791
792 if (ret != 0)
Michal Kazior7aa7a722014-08-25 12:09:38 +0200793 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
Kalle Valo50f87a62014-03-28 09:32:52 +0200794 address, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300795
796 return ret;
797}
798
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300799static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
800{
801 __le32 val = __cpu_to_le32(value);
802
803 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
804}
805
Kalle Valo5e3dd152013-06-12 20:52:10 +0300806/* Write 4B data to Target memory or register */
807static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
808 u32 data)
809{
810 /* Assume range doesn't cross this boundary */
811 if (address >= DRAM_BASE_ADDRESS)
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300812 return ath10k_pci_diag_write32(ar, address, data);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300813
Kalle Valo5e3dd152013-06-12 20:52:10 +0300814 ath10k_pci_write32(ar, address, data);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300815 return 0;
816}
817
Michal Kaziorc0c378f2014-08-07 11:03:28 +0200818static bool ath10k_pci_is_awake(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300819{
Michal Kaziorc0c378f2014-08-07 11:03:28 +0200820 u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
821
822 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300823}
824
Michal Kaziorc0c378f2014-08-07 11:03:28 +0200825static int ath10k_pci_wake_wait(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300826{
Kalle Valo5e3dd152013-06-12 20:52:10 +0300827 int tot_delay = 0;
828 int curr_delay = 5;
829
Michal Kaziorc0c378f2014-08-07 11:03:28 +0200830 while (tot_delay < PCIE_WAKE_TIMEOUT) {
831 if (ath10k_pci_is_awake(ar))
Kalle Valo3aebe542013-09-01 10:02:07 +0300832 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300833
834 udelay(curr_delay);
835 tot_delay += curr_delay;
836
837 if (curr_delay < 50)
838 curr_delay += 5;
839 }
Michal Kaziorc0c378f2014-08-07 11:03:28 +0200840
841 return -ETIMEDOUT;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300842}
843
Michal Kaziorc0c378f2014-08-07 11:03:28 +0200844static int ath10k_pci_wake(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300845{
Michal Kaziorc0c378f2014-08-07 11:03:28 +0200846 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
847 PCIE_SOC_WAKE_V_MASK);
848 return ath10k_pci_wake_wait(ar);
849}
Kalle Valo5e3dd152013-06-12 20:52:10 +0300850
Michal Kaziorc0c378f2014-08-07 11:03:28 +0200851static void ath10k_pci_sleep(struct ath10k *ar)
852{
853 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
854 PCIE_SOC_WAKE_RESET);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300855}
856
Kalle Valo5e3dd152013-06-12 20:52:10 +0300857/* Called by lower (CE) layer when a send to Target completes. */
Michal Kazior5440ce22013-09-03 15:09:58 +0200858static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300859{
860 struct ath10k *ar = ce_state->ar;
861 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2f5280d2014-02-27 18:50:05 +0200862 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
Michal Kazior5440ce22013-09-03 15:09:58 +0200863 void *transfer_context;
864 u32 ce_data;
865 unsigned int nbytes;
866 unsigned int transfer_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300867
Michal Kazior5440ce22013-09-03 15:09:58 +0200868 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
869 &ce_data, &nbytes,
870 &transfer_id) == 0) {
Michal Kaziora16942e2014-02-27 18:50:04 +0200871 /* no need to call tx completion for NULL pointers */
Michal Kazior726346f2014-02-27 18:50:04 +0200872 if (transfer_context == NULL)
873 continue;
874
Michal Kazior2f5280d2014-02-27 18:50:05 +0200875 cb->tx_completion(ar, transfer_context, transfer_id);
Michal Kazior5440ce22013-09-03 15:09:58 +0200876 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300877}
878
879/* Called by lower (CE) layer when data is received from the Target. */
Michal Kazior5440ce22013-09-03 15:09:58 +0200880static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300881{
882 struct ath10k *ar = ce_state->ar;
883 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +0200884 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
Michal Kazior2f5280d2014-02-27 18:50:05 +0200885 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300886 struct sk_buff *skb;
Michal Kazior5440ce22013-09-03 15:09:58 +0200887 void *transfer_context;
888 u32 ce_data;
Michal Kazior2f5280d2014-02-27 18:50:05 +0200889 unsigned int nbytes, max_nbytes;
Michal Kazior5440ce22013-09-03 15:09:58 +0200890 unsigned int transfer_id;
891 unsigned int flags;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300892
Michal Kazior5440ce22013-09-03 15:09:58 +0200893 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
894 &ce_data, &nbytes, &transfer_id,
895 &flags) == 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300896 skb = transfer_context;
Michal Kazior2f5280d2014-02-27 18:50:05 +0200897 max_nbytes = skb->len + skb_tailroom(skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300898 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
Michal Kazior2f5280d2014-02-27 18:50:05 +0200899 max_nbytes, DMA_FROM_DEVICE);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300900
Michal Kazior2f5280d2014-02-27 18:50:05 +0200901 if (unlikely(max_nbytes < nbytes)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200902 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
Michal Kazior2f5280d2014-02-27 18:50:05 +0200903 nbytes, max_nbytes);
904 dev_kfree_skb_any(skb);
905 continue;
906 }
907
908 skb_put(skb, nbytes);
909 cb->rx_completion(ar, skb, pipe_info->pipe_num);
910 }
Michal Kaziorc29a3802014-07-21 21:03:10 +0300911
Michal Kazior728f95e2014-08-22 14:33:14 +0200912 ath10k_pci_rx_post_pipe(pipe_info);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300913}
914
Michal Kazior726346f2014-02-27 18:50:04 +0200915static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
916 struct ath10k_hif_sg_item *items, int n_items)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300917{
Kalle Valo5e3dd152013-06-12 20:52:10 +0300918 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior726346f2014-02-27 18:50:04 +0200919 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
920 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
921 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
Michal Kazior7147a132014-05-26 12:02:58 +0200922 unsigned int nentries_mask;
923 unsigned int sw_index;
924 unsigned int write_index;
Michal Kazior08b8aa02014-05-26 12:02:59 +0200925 int err, i = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300926
Michal Kazior726346f2014-02-27 18:50:04 +0200927 spin_lock_bh(&ar_pci->ce_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300928
Michal Kazior7147a132014-05-26 12:02:58 +0200929 nentries_mask = src_ring->nentries_mask;
930 sw_index = src_ring->sw_index;
931 write_index = src_ring->write_index;
932
Michal Kazior726346f2014-02-27 18:50:04 +0200933 if (unlikely(CE_RING_DELTA(nentries_mask,
934 write_index, sw_index - 1) < n_items)) {
935 err = -ENOBUFS;
Michal Kazior08b8aa02014-05-26 12:02:59 +0200936 goto err;
Michal Kazior726346f2014-02-27 18:50:04 +0200937 }
938
939 for (i = 0; i < n_items - 1; i++) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200940 ath10k_dbg(ar, ATH10K_DBG_PCI,
Michal Kazior726346f2014-02-27 18:50:04 +0200941 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
942 i, items[i].paddr, items[i].len, n_items);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200943 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
Michal Kazior726346f2014-02-27 18:50:04 +0200944 items[i].vaddr, items[i].len);
945
946 err = ath10k_ce_send_nolock(ce_pipe,
947 items[i].transfer_context,
948 items[i].paddr,
949 items[i].len,
950 items[i].transfer_id,
951 CE_SEND_FLAG_GATHER);
952 if (err)
Michal Kazior08b8aa02014-05-26 12:02:59 +0200953 goto err;
Michal Kazior726346f2014-02-27 18:50:04 +0200954 }
955
956 /* `i` is equal to `n_items -1` after for() */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300957
Michal Kazior7aa7a722014-08-25 12:09:38 +0200958 ath10k_dbg(ar, ATH10K_DBG_PCI,
Michal Kazior726346f2014-02-27 18:50:04 +0200959 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
960 i, items[i].paddr, items[i].len, n_items);
Michal Kazior7aa7a722014-08-25 12:09:38 +0200961 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
Michal Kazior726346f2014-02-27 18:50:04 +0200962 items[i].vaddr, items[i].len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300963
Michal Kazior726346f2014-02-27 18:50:04 +0200964 err = ath10k_ce_send_nolock(ce_pipe,
965 items[i].transfer_context,
966 items[i].paddr,
967 items[i].len,
968 items[i].transfer_id,
969 0);
970 if (err)
Michal Kazior08b8aa02014-05-26 12:02:59 +0200971 goto err;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300972
Michal Kazior08b8aa02014-05-26 12:02:59 +0200973 spin_unlock_bh(&ar_pci->ce_lock);
974 return 0;
975
976err:
977 for (; i > 0; i--)
978 __ath10k_ce_send_revert(ce_pipe);
979
Michal Kazior726346f2014-02-27 18:50:04 +0200980 spin_unlock_bh(&ar_pci->ce_lock);
981 return err;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300982}
983
984static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
985{
986 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo50f87a62014-03-28 09:32:52 +0200987
Michal Kazior7aa7a722014-08-25 12:09:38 +0200988 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
Kalle Valo50f87a62014-03-28 09:32:52 +0200989
Michal Kazior3efcb3b2013-10-02 11:03:41 +0200990 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300991}
992
Ben Greear384914b2014-08-25 08:37:32 +0300993static void ath10k_pci_dump_registers(struct ath10k *ar,
994 struct ath10k_fw_crash_data *crash_data)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300995{
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300996 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
997 int i, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300998
Ben Greear384914b2014-08-25 08:37:32 +0300999 lockdep_assert_held(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001000
Kalle Valo3d29a3e2014-08-25 08:37:26 +03001001 ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1002 hi_failure_state,
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001003 REG_DUMP_COUNT_QCA988X * sizeof(__le32));
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001004 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001005 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001006 return;
1007 }
1008
1009 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1010
Michal Kazior7aa7a722014-08-25 12:09:38 +02001011 ath10k_err(ar, "firmware register dump:\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001012 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
Michal Kazior7aa7a722014-08-25 12:09:38 +02001013 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001014 i,
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001015 __le32_to_cpu(reg_dump_values[i]),
1016 __le32_to_cpu(reg_dump_values[i + 1]),
1017 __le32_to_cpu(reg_dump_values[i + 2]),
1018 __le32_to_cpu(reg_dump_values[i + 3]));
Michal Kazioraffd3212013-07-16 09:54:35 +02001019
Michal Kazior1bbb1192014-08-25 12:13:14 +02001020 if (!crash_data)
1021 return;
1022
Ben Greear384914b2014-08-25 08:37:32 +03001023 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001024 crash_data->registers[i] = reg_dump_values[i];
Ben Greear384914b2014-08-25 08:37:32 +03001025}
1026
Kalle Valo0e9848c2014-08-25 08:37:37 +03001027static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
Ben Greear384914b2014-08-25 08:37:32 +03001028{
1029 struct ath10k_fw_crash_data *crash_data;
1030 char uuid[50];
1031
1032 spin_lock_bh(&ar->data_lock);
1033
1034 crash_data = ath10k_debug_get_new_fw_crash_data(ar);
1035
1036 if (crash_data)
1037 scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
1038 else
1039 scnprintf(uuid, sizeof(uuid), "n/a");
1040
Michal Kazior7aa7a722014-08-25 12:09:38 +02001041 ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
Kalle Valo8a0c7972014-08-25 08:37:45 +03001042 ath10k_print_driver_info(ar);
Ben Greear384914b2014-08-25 08:37:32 +03001043 ath10k_pci_dump_registers(ar, crash_data);
1044
Ben Greear384914b2014-08-25 08:37:32 +03001045 spin_unlock_bh(&ar->data_lock);
Michal Kazioraffd3212013-07-16 09:54:35 +02001046
Michal Kazior5e90de82013-10-16 16:46:05 +03001047 queue_work(ar->workqueue, &ar->restart_work);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001048}
1049
1050static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1051 int force)
1052{
Michal Kazior7aa7a722014-08-25 12:09:38 +02001053 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
Kalle Valo50f87a62014-03-28 09:32:52 +02001054
Kalle Valo5e3dd152013-06-12 20:52:10 +03001055 if (!force) {
1056 int resources;
1057 /*
1058 * Decide whether to actually poll for completions, or just
1059 * wait for a later chance.
1060 * If there seem to be plenty of resources left, then just wait
1061 * since checking involves reading a CE register, which is a
1062 * relatively expensive operation.
1063 */
1064 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1065
1066 /*
1067 * If at least 50% of the total resources are still available,
1068 * don't bother checking again yet.
1069 */
1070 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1071 return;
1072 }
1073 ath10k_ce_per_engine_service(ar, pipe);
1074}
1075
Michal Kaziore799bbf2013-07-05 16:15:12 +03001076static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
1077 struct ath10k_hif_cb *callbacks)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001078{
1079 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1080
Michal Kazior7aa7a722014-08-25 12:09:38 +02001081 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001082
1083 memcpy(&ar_pci->msg_callbacks_current, callbacks,
1084 sizeof(ar_pci->msg_callbacks_current));
1085}
1086
Michal Kazior96a9d0d2013-11-08 08:01:25 +01001087static void ath10k_pci_kill_tasklet(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001088{
1089 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001090 int i;
1091
Kalle Valo5e3dd152013-06-12 20:52:10 +03001092 tasklet_kill(&ar_pci->intr_tq);
Michal Kazior103d4f52013-11-08 08:01:24 +01001093 tasklet_kill(&ar_pci->msi_fw_err);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001094
1095 for (i = 0; i < CE_COUNT; i++)
1096 tasklet_kill(&ar_pci->pipe_info[i].intr);
Michal Kazior728f95e2014-08-22 14:33:14 +02001097
1098 del_timer_sync(&ar_pci->rx_post_retry);
Michal Kazior96a9d0d2013-11-08 08:01:25 +01001099}
1100
Kalle Valo5e3dd152013-06-12 20:52:10 +03001101static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1102 u16 service_id, u8 *ul_pipe,
1103 u8 *dl_pipe, int *ul_is_polled,
1104 int *dl_is_polled)
1105{
Michal Kazior7c6aa252014-08-26 19:14:03 +03001106 const struct service_to_pipe *entry;
1107 bool ul_set = false, dl_set = false;
1108 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001109
Michal Kazior7aa7a722014-08-25 12:09:38 +02001110 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
Kalle Valo50f87a62014-03-28 09:32:52 +02001111
Kalle Valo5e3dd152013-06-12 20:52:10 +03001112 /* polling for received messages not supported */
1113 *dl_is_polled = 0;
1114
Michal Kazior7c6aa252014-08-26 19:14:03 +03001115 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1116 entry = &target_service_to_ce_map_wlan[i];
Kalle Valo5e3dd152013-06-12 20:52:10 +03001117
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001118 if (__le32_to_cpu(entry->service_id) != service_id)
Michal Kazior7c6aa252014-08-26 19:14:03 +03001119 continue;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001120
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001121 switch (__le32_to_cpu(entry->pipedir)) {
Michal Kazior7c6aa252014-08-26 19:14:03 +03001122 case PIPEDIR_NONE:
1123 break;
1124 case PIPEDIR_IN:
1125 WARN_ON(dl_set);
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001126 *dl_pipe = __le32_to_cpu(entry->pipenum);
Michal Kazior7c6aa252014-08-26 19:14:03 +03001127 dl_set = true;
1128 break;
1129 case PIPEDIR_OUT:
1130 WARN_ON(ul_set);
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001131 *ul_pipe = __le32_to_cpu(entry->pipenum);
Michal Kazior7c6aa252014-08-26 19:14:03 +03001132 ul_set = true;
1133 break;
1134 case PIPEDIR_INOUT:
1135 WARN_ON(dl_set);
1136 WARN_ON(ul_set);
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001137 *dl_pipe = __le32_to_cpu(entry->pipenum);
1138 *ul_pipe = __le32_to_cpu(entry->pipenum);
Michal Kazior7c6aa252014-08-26 19:14:03 +03001139 dl_set = true;
1140 ul_set = true;
1141 break;
1142 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001143 }
Michal Kazior7c6aa252014-08-26 19:14:03 +03001144
1145 if (WARN_ON(!ul_set || !dl_set))
1146 return -ENOENT;
1147
Kalle Valo5e3dd152013-06-12 20:52:10 +03001148 *ul_is_polled =
1149 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1150
Michal Kazior7c6aa252014-08-26 19:14:03 +03001151 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001152}
1153
1154static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1155 u8 *ul_pipe, u8 *dl_pipe)
1156{
1157 int ul_is_polled, dl_is_polled;
1158
Michal Kazior7aa7a722014-08-25 12:09:38 +02001159 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
Kalle Valo50f87a62014-03-28 09:32:52 +02001160
Kalle Valo5e3dd152013-06-12 20:52:10 +03001161 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1162 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1163 ul_pipe,
1164 dl_pipe,
1165 &ul_is_polled,
1166 &dl_is_polled);
1167}
1168
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001169static void ath10k_pci_irq_disable(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001170{
Kalle Valo5e3dd152013-06-12 20:52:10 +03001171 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001172 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001173
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001174 ath10k_ce_disable_interrupts(ar);
Michal Kaziore75db4e2014-08-28 22:14:16 +03001175 ath10k_pci_disable_and_clear_legacy_irq(ar);
1176 /* FIXME: How to mask all MSI interrupts? */
Kalle Valo5e3dd152013-06-12 20:52:10 +03001177
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001178 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1179 synchronize_irq(ar_pci->pdev->irq + i);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001180}
1181
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001182static void ath10k_pci_irq_enable(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001183{
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001184 ath10k_ce_enable_interrupts(ar);
Michal Kaziore75db4e2014-08-28 22:14:16 +03001185 ath10k_pci_enable_legacy_irq(ar);
1186 /* FIXME: How to unmask all MSI interrupts? */
Kalle Valo5e3dd152013-06-12 20:52:10 +03001187}
1188
1189static int ath10k_pci_hif_start(struct ath10k *ar)
1190{
Michal Kazior7aa7a722014-08-25 12:09:38 +02001191 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001192
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001193 ath10k_pci_irq_enable(ar);
Michal Kazior728f95e2014-08-22 14:33:14 +02001194 ath10k_pci_rx_post(ar);
Kalle Valo50f87a62014-03-28 09:32:52 +02001195
Kalle Valo5e3dd152013-06-12 20:52:10 +03001196 return 0;
1197}
1198
Michal Kazior87263e52013-08-27 13:08:01 +02001199static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001200{
1201 struct ath10k *ar;
1202 struct ath10k_pci *ar_pci;
Michal Kazior2aa39112013-08-27 13:08:02 +02001203 struct ath10k_ce_pipe *ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001204 u32 buf_sz;
1205 struct sk_buff *netbuf;
1206 u32 ce_data;
1207
1208 buf_sz = pipe_info->buf_sz;
1209
1210 /* Unused Copy Engine */
1211 if (buf_sz == 0)
1212 return;
1213
1214 ar = pipe_info->hif_ce_state;
1215 ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001216 ce_hdl = pipe_info->ce_hdl;
1217
1218 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1219 &ce_data) == 0) {
1220 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1221 netbuf->len + skb_tailroom(netbuf),
1222 DMA_FROM_DEVICE);
1223 dev_kfree_skb_any(netbuf);
1224 }
1225}
1226
Michal Kazior87263e52013-08-27 13:08:01 +02001227static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001228{
1229 struct ath10k *ar;
1230 struct ath10k_pci *ar_pci;
Michal Kazior2aa39112013-08-27 13:08:02 +02001231 struct ath10k_ce_pipe *ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001232 struct sk_buff *netbuf;
1233 u32 ce_data;
1234 unsigned int nbytes;
1235 unsigned int id;
1236 u32 buf_sz;
1237
1238 buf_sz = pipe_info->buf_sz;
1239
1240 /* Unused Copy Engine */
1241 if (buf_sz == 0)
1242 return;
1243
1244 ar = pipe_info->hif_ce_state;
1245 ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001246 ce_hdl = pipe_info->ce_hdl;
1247
1248 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1249 &ce_data, &nbytes, &id) == 0) {
Michal Kaziora16942e2014-02-27 18:50:04 +02001250 /* no need to call tx completion for NULL pointers */
1251 if (!netbuf)
Michal Kazior2415fc12013-11-08 08:01:32 +01001252 continue;
Michal Kazior2415fc12013-11-08 08:01:32 +01001253
Kalle Valoe9bb0aa2013-09-08 18:36:11 +03001254 ar_pci->msg_callbacks_current.tx_completion(ar,
1255 netbuf,
1256 id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001257 }
1258}
1259
1260/*
1261 * Cleanup residual buffers for device shutdown:
1262 * buffers that were enqueued for receive
1263 * buffers that were to be sent
1264 * Note: Buffers that had completed but which were
1265 * not yet processed are on a completion queue. They
1266 * are handled when the completion thread shuts down.
1267 */
1268static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1269{
1270 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1271 int pipe_num;
1272
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001273 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Michal Kazior87263e52013-08-27 13:08:01 +02001274 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001275
1276 pipe_info = &ar_pci->pipe_info[pipe_num];
1277 ath10k_pci_rx_pipe_cleanup(pipe_info);
1278 ath10k_pci_tx_pipe_cleanup(pipe_info);
1279 }
1280}
1281
1282static void ath10k_pci_ce_deinit(struct ath10k *ar)
1283{
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001284 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001285
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001286 for (i = 0; i < CE_COUNT; i++)
1287 ath10k_ce_deinit_pipe(ar, i);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001288}
1289
Michal Kazior728f95e2014-08-22 14:33:14 +02001290static void ath10k_pci_flush(struct ath10k *ar)
1291{
1292 ath10k_pci_kill_tasklet(ar);
1293 ath10k_pci_buffer_cleanup(ar);
1294}
1295
Kalle Valo5e3dd152013-06-12 20:52:10 +03001296static void ath10k_pci_hif_stop(struct ath10k *ar)
1297{
Michal Kazior7aa7a722014-08-25 12:09:38 +02001298 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
Michal Kazior32270b62013-08-02 09:15:47 +02001299
Michal Kazior10d23db2014-08-22 14:33:15 +02001300 /* Most likely the device has HTT Rx ring configured. The only way to
1301 * prevent the device from accessing (and possible corrupting) host
1302 * memory is to reset the chip now.
Michal Kaziore75db4e2014-08-28 22:14:16 +03001303 *
1304 * There's also no known way of masking MSI interrupts on the device.
1305 * For ranged MSI the CE-related interrupts can be masked. However
1306 * regardless how many MSI interrupts are assigned the first one
1307 * is always used for firmware indications (crashes) and cannot be
1308 * masked. To prevent the device from asserting the interrupt reset it
1309 * before proceeding with cleanup.
Michal Kazior10d23db2014-08-22 14:33:15 +02001310 */
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001311 ath10k_pci_warm_reset(ar);
Michal Kaziore75db4e2014-08-28 22:14:16 +03001312
1313 ath10k_pci_irq_disable(ar);
1314 ath10k_pci_flush(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001315}
1316
1317static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1318 void *req, u32 req_len,
1319 void *resp, u32 *resp_len)
1320{
1321 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001322 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1323 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1324 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1325 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001326 dma_addr_t req_paddr = 0;
1327 dma_addr_t resp_paddr = 0;
1328 struct bmi_xfer xfer = {};
1329 void *treq, *tresp = NULL;
1330 int ret = 0;
1331
Michal Kazior85622cd2013-11-25 14:06:22 +01001332 might_sleep();
1333
Kalle Valo5e3dd152013-06-12 20:52:10 +03001334 if (resp && !resp_len)
1335 return -EINVAL;
1336
1337 if (resp && resp_len && *resp_len == 0)
1338 return -EINVAL;
1339
1340 treq = kmemdup(req, req_len, GFP_KERNEL);
1341 if (!treq)
1342 return -ENOMEM;
1343
1344 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1345 ret = dma_mapping_error(ar->dev, req_paddr);
1346 if (ret)
1347 goto err_dma;
1348
1349 if (resp && resp_len) {
1350 tresp = kzalloc(*resp_len, GFP_KERNEL);
1351 if (!tresp) {
1352 ret = -ENOMEM;
1353 goto err_req;
1354 }
1355
1356 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1357 DMA_FROM_DEVICE);
1358 ret = dma_mapping_error(ar->dev, resp_paddr);
1359 if (ret)
1360 goto err_req;
1361
1362 xfer.wait_for_resp = true;
1363 xfer.resp_len = 0;
1364
Michal Kazior728f95e2014-08-22 14:33:14 +02001365 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001366 }
1367
Kalle Valo5e3dd152013-06-12 20:52:10 +03001368 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1369 if (ret)
1370 goto err_resp;
1371
Michal Kazior85622cd2013-11-25 14:06:22 +01001372 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1373 if (ret) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001374 u32 unused_buffer;
1375 unsigned int unused_nbytes;
1376 unsigned int unused_id;
1377
Kalle Valo5e3dd152013-06-12 20:52:10 +03001378 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1379 &unused_nbytes, &unused_id);
1380 } else {
1381 /* non-zero means we did not time out */
1382 ret = 0;
1383 }
1384
1385err_resp:
1386 if (resp) {
1387 u32 unused_buffer;
1388
1389 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1390 dma_unmap_single(ar->dev, resp_paddr,
1391 *resp_len, DMA_FROM_DEVICE);
1392 }
1393err_req:
1394 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1395
1396 if (ret == 0 && resp_len) {
1397 *resp_len = min(*resp_len, xfer.resp_len);
1398 memcpy(resp, tresp, xfer.resp_len);
1399 }
1400err_dma:
1401 kfree(treq);
1402 kfree(tresp);
1403
1404 return ret;
1405}
1406
Michal Kazior5440ce22013-09-03 15:09:58 +02001407static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001408{
Michal Kazior5440ce22013-09-03 15:09:58 +02001409 struct bmi_xfer *xfer;
1410 u32 ce_data;
1411 unsigned int nbytes;
1412 unsigned int transfer_id;
1413
1414 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1415 &nbytes, &transfer_id))
1416 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001417
Michal Kazior2374b182014-07-14 16:25:25 +03001418 xfer->tx_done = true;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001419}
1420
Michal Kazior5440ce22013-09-03 15:09:58 +02001421static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001422{
Michal Kazior7aa7a722014-08-25 12:09:38 +02001423 struct ath10k *ar = ce_state->ar;
Michal Kazior5440ce22013-09-03 15:09:58 +02001424 struct bmi_xfer *xfer;
1425 u32 ce_data;
1426 unsigned int nbytes;
1427 unsigned int transfer_id;
1428 unsigned int flags;
1429
1430 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1431 &nbytes, &transfer_id, &flags))
1432 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001433
1434 if (!xfer->wait_for_resp) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001435 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001436 return;
1437 }
1438
1439 xfer->resp_len = nbytes;
Michal Kazior2374b182014-07-14 16:25:25 +03001440 xfer->rx_done = true;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001441}
1442
Michal Kazior85622cd2013-11-25 14:06:22 +01001443static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1444 struct ath10k_ce_pipe *rx_pipe,
1445 struct bmi_xfer *xfer)
1446{
1447 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1448
1449 while (time_before_eq(jiffies, timeout)) {
1450 ath10k_pci_bmi_send_done(tx_pipe);
1451 ath10k_pci_bmi_recv_data(rx_pipe);
1452
Michal Kazior2374b182014-07-14 16:25:25 +03001453 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
Michal Kazior85622cd2013-11-25 14:06:22 +01001454 return 0;
1455
1456 schedule();
1457 }
1458
1459 return -ETIMEDOUT;
1460}
1461
Kalle Valo5e3dd152013-06-12 20:52:10 +03001462/*
Kalle Valo5e3dd152013-06-12 20:52:10 +03001463 * Send an interrupt to the device to wake up the Target CPU
1464 * so it has an opportunity to notice any changed state.
1465 */
1466static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1467{
1468 int ret;
1469 u32 core_ctrl;
1470
1471 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1472 CORE_CTRL_ADDRESS,
1473 &core_ctrl);
1474 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001475 ath10k_warn(ar, "failed to read core_ctrl: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001476 return ret;
1477 }
1478
1479 /* A_INUM_FIRMWARE interrupt to Target CPU */
1480 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1481
1482 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1483 CORE_CTRL_ADDRESS,
1484 core_ctrl);
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001485 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001486 ath10k_warn(ar, "failed to set target CPU interrupt mask: %d\n",
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001487 ret);
1488 return ret;
1489 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001490
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001491 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001492}
1493
1494static int ath10k_pci_init_config(struct ath10k *ar)
1495{
1496 u32 interconnect_targ_addr;
1497 u32 pcie_state_targ_addr = 0;
1498 u32 pipe_cfg_targ_addr = 0;
1499 u32 svc_to_pipe_map = 0;
1500 u32 pcie_config_flags = 0;
1501 u32 ealloc_value;
1502 u32 ealloc_targ_addr;
1503 u32 flag2_value;
1504 u32 flag2_targ_addr;
1505 int ret = 0;
1506
1507 /* Download to Target the CE Config and the service-to-CE map */
1508 interconnect_targ_addr =
1509 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1510
1511 /* Supply Target-side CE configuration */
1512 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1513 &pcie_state_targ_addr);
1514 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001515 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001516 return ret;
1517 }
1518
1519 if (pcie_state_targ_addr == 0) {
1520 ret = -EIO;
Michal Kazior7aa7a722014-08-25 12:09:38 +02001521 ath10k_err(ar, "Invalid pcie state addr\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001522 return ret;
1523 }
1524
1525 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1526 offsetof(struct pcie_state,
1527 pipe_cfg_addr),
1528 &pipe_cfg_targ_addr);
1529 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001530 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001531 return ret;
1532 }
1533
1534 if (pipe_cfg_targ_addr == 0) {
1535 ret = -EIO;
Michal Kazior7aa7a722014-08-25 12:09:38 +02001536 ath10k_err(ar, "Invalid pipe cfg addr\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001537 return ret;
1538 }
1539
1540 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1541 target_ce_config_wlan,
1542 sizeof(target_ce_config_wlan));
1543
1544 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001545 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001546 return ret;
1547 }
1548
1549 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1550 offsetof(struct pcie_state,
1551 svc_to_pipe_map),
1552 &svc_to_pipe_map);
1553 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001554 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001555 return ret;
1556 }
1557
1558 if (svc_to_pipe_map == 0) {
1559 ret = -EIO;
Michal Kazior7aa7a722014-08-25 12:09:38 +02001560 ath10k_err(ar, "Invalid svc_to_pipe map\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001561 return ret;
1562 }
1563
1564 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1565 target_service_to_ce_map_wlan,
1566 sizeof(target_service_to_ce_map_wlan));
1567 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001568 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001569 return ret;
1570 }
1571
1572 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1573 offsetof(struct pcie_state,
1574 config_flags),
1575 &pcie_config_flags);
1576 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001577 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001578 return ret;
1579 }
1580
1581 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1582
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001583 ret = ath10k_pci_diag_write_access(ar, pcie_state_targ_addr +
Kalle Valo5e3dd152013-06-12 20:52:10 +03001584 offsetof(struct pcie_state, config_flags),
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001585 pcie_config_flags);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001586 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001587 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001588 return ret;
1589 }
1590
1591 /* configure early allocation */
1592 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1593
1594 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1595 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001596 ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001597 return ret;
1598 }
1599
1600 /* first bank is switched to IRAM */
1601 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1602 HI_EARLY_ALLOC_MAGIC_MASK);
1603 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1604 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1605
1606 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1607 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001608 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001609 return ret;
1610 }
1611
1612 /* Tell Target to proceed with initialization */
1613 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1614
1615 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1616 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001617 ath10k_err(ar, "Failed to get option val: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001618 return ret;
1619 }
1620
1621 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1622
1623 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1624 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001625 ath10k_err(ar, "Failed to set option val: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001626 return ret;
1627 }
1628
1629 return 0;
1630}
1631
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001632static int ath10k_pci_alloc_ce(struct ath10k *ar)
1633{
1634 int i, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001635
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001636 for (i = 0; i < CE_COUNT; i++) {
1637 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1638 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001639 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001640 i, ret);
1641 return ret;
1642 }
1643 }
1644
1645 return 0;
1646}
1647
1648static void ath10k_pci_free_ce(struct ath10k *ar)
1649{
1650 int i;
1651
1652 for (i = 0; i < CE_COUNT; i++)
1653 ath10k_ce_free_pipe(ar, i);
1654}
Kalle Valo5e3dd152013-06-12 20:52:10 +03001655
1656static int ath10k_pci_ce_init(struct ath10k *ar)
1657{
1658 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001659 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001660 const struct ce_attr *attr;
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001661 int pipe_num, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001662
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001663 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001664 pipe_info = &ar_pci->pipe_info[pipe_num];
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001665 pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
Kalle Valo5e3dd152013-06-12 20:52:10 +03001666 pipe_info->pipe_num = pipe_num;
1667 pipe_info->hif_ce_state = ar;
1668 attr = &host_ce_config_wlan[pipe_num];
1669
Michal Kazior145cc122014-08-22 14:23:32 +02001670 ret = ath10k_ce_init_pipe(ar, pipe_num, attr,
1671 ath10k_pci_ce_send_done,
1672 ath10k_pci_ce_recv_data);
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001673 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001674 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001675 pipe_num, ret);
1676 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001677 }
1678
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001679 if (pipe_num == CE_COUNT - 1) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001680 /*
1681 * Reserve the ultimate CE for
1682 * diagnostic Window support
1683 */
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001684 ar_pci->ce_diag = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001685 continue;
1686 }
1687
1688 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1689 }
1690
Kalle Valo5e3dd152013-06-12 20:52:10 +03001691 return 0;
1692}
1693
Michal Kazior5c771e72014-08-22 14:23:34 +02001694static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001695{
Michal Kazior5c771e72014-08-22 14:23:34 +02001696 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
1697 FW_IND_EVENT_PENDING;
1698}
Kalle Valo5e3dd152013-06-12 20:52:10 +03001699
Michal Kazior5c771e72014-08-22 14:23:34 +02001700static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
1701{
1702 u32 val;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001703
Michal Kazior5c771e72014-08-22 14:23:34 +02001704 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1705 val &= ~FW_IND_EVENT_PENDING;
1706 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001707}
1708
Michal Kaziorde013572014-05-14 16:56:16 +03001709/* this function effectively clears target memory controller assert line */
1710static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
1711{
1712 u32 val;
1713
1714 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1715 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1716 val | SOC_RESET_CONTROL_SI0_RST_MASK);
1717 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1718
1719 msleep(10);
1720
1721 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1722 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1723 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
1724 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1725
1726 msleep(10);
1727}
1728
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001729static int ath10k_pci_warm_reset(struct ath10k *ar)
1730{
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001731 u32 val;
1732
Michal Kazior7aa7a722014-08-25 12:09:38 +02001733 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001734
1735 /* debug */
1736 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1737 PCIE_INTR_CAUSE_ADDRESS);
Michal Kazior7aa7a722014-08-25 12:09:38 +02001738 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
1739 val);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001740
1741 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1742 CPU_INTR_ADDRESS);
Michal Kazior7aa7a722014-08-25 12:09:38 +02001743 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001744 val);
1745
1746 /* disable pending irqs */
1747 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1748 PCIE_INTR_ENABLE_ADDRESS, 0);
1749
1750 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1751 PCIE_INTR_CLR_ADDRESS, ~0);
1752
1753 msleep(100);
1754
1755 /* clear fw indicator */
Kalle Valob39712c2014-03-28 09:32:46 +02001756 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001757
1758 /* clear target LF timer interrupts */
1759 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1760 SOC_LF_TIMER_CONTROL0_ADDRESS);
1761 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1762 SOC_LF_TIMER_CONTROL0_ADDRESS,
1763 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1764
1765 /* reset CE */
1766 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1767 SOC_RESET_CONTROL_ADDRESS);
1768 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1769 val | SOC_RESET_CONTROL_CE_RST_MASK);
1770 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1771 SOC_RESET_CONTROL_ADDRESS);
1772 msleep(10);
1773
1774 /* unreset CE */
1775 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1776 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1777 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1778 SOC_RESET_CONTROL_ADDRESS);
1779 msleep(10);
1780
Michal Kaziorde013572014-05-14 16:56:16 +03001781 ath10k_pci_warm_reset_si0(ar);
1782
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001783 /* debug */
1784 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1785 PCIE_INTR_CAUSE_ADDRESS);
Michal Kazior7aa7a722014-08-25 12:09:38 +02001786 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
1787 val);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001788
1789 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1790 CPU_INTR_ADDRESS);
Michal Kazior7aa7a722014-08-25 12:09:38 +02001791 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001792 val);
1793
1794 /* CPU warm reset */
1795 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1796 SOC_RESET_CONTROL_ADDRESS);
1797 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1798 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1799
1800 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1801 SOC_RESET_CONTROL_ADDRESS);
Michal Kazior7aa7a722014-08-25 12:09:38 +02001802 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n",
1803 val);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001804
1805 msleep(100);
1806
Michal Kazior7aa7a722014-08-25 12:09:38 +02001807 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001808
Michal Kaziorc0c378f2014-08-07 11:03:28 +02001809 return 0;
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001810}
1811
1812static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
Michal Kazior8c5c5362013-07-16 09:38:50 +02001813{
1814 int ret;
1815
1816 /*
1817 * Bring the target up cleanly.
1818 *
1819 * The target may be in an undefined state with an AUX-powered Target
1820 * and a Host in WoW mode. If the Host crashes, loses power, or is
1821 * restarted (without unloading the driver) then the Target is left
1822 * (aux) powered and running. On a subsequent driver load, the Target
1823 * is in an unexpected state. We try to catch that here in order to
1824 * reset the Target and retry the probe.
1825 */
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001826 if (cold_reset)
1827 ret = ath10k_pci_cold_reset(ar);
1828 else
1829 ret = ath10k_pci_warm_reset(ar);
1830
Michal Kazior5b2589f2013-11-08 08:01:30 +01001831 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001832 ath10k_err(ar, "failed to reset target: %d\n", ret);
Michal Kazior98563d52013-11-08 08:01:33 +01001833 goto err;
Michal Kazior5b2589f2013-11-08 08:01:30 +01001834 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02001835
Michal Kazior8c5c5362013-07-16 09:38:50 +02001836 ret = ath10k_pci_ce_init(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001837 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001838 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
Michal Kaziorc0c378f2014-08-07 11:03:28 +02001839 goto err;
Michal Kaziorab977bd2013-11-25 14:06:26 +01001840 }
1841
Michal Kazior98563d52013-11-08 08:01:33 +01001842 ret = ath10k_pci_wait_for_target_init(ar);
1843 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001844 ath10k_err(ar, "failed to wait for target to init: %d\n", ret);
Michal Kazior5c771e72014-08-22 14:23:34 +02001845 goto err_ce;
Michal Kazior98563d52013-11-08 08:01:33 +01001846 }
1847
1848 ret = ath10k_pci_init_config(ar);
1849 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001850 ath10k_err(ar, "failed to setup init config: %d\n", ret);
Michal Kazior5c771e72014-08-22 14:23:34 +02001851 goto err_ce;
Michal Kazior98563d52013-11-08 08:01:33 +01001852 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02001853
1854 ret = ath10k_pci_wake_target_cpu(ar);
1855 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001856 ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
Michal Kazior5c771e72014-08-22 14:23:34 +02001857 goto err_ce;
Michal Kazior8c5c5362013-07-16 09:38:50 +02001858 }
1859
1860 return 0;
1861
1862err_ce:
1863 ath10k_pci_ce_deinit(ar);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001864 ath10k_pci_warm_reset(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001865err:
1866 return ret;
1867}
1868
Michal Kazior61c95ce2014-05-14 16:56:16 +03001869static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
1870{
1871 int i, ret;
1872
1873 /*
1874 * Sometime warm reset succeeds after retries.
1875 *
1876 * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
1877 * at first try.
1878 */
1879 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
1880 ret = __ath10k_pci_hif_power_up(ar, false);
1881 if (ret == 0)
1882 break;
1883
Michal Kazior7aa7a722014-08-25 12:09:38 +02001884 ath10k_warn(ar, "failed to warm reset (attempt %d out of %d): %d\n",
Michal Kazior61c95ce2014-05-14 16:56:16 +03001885 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
1886 }
1887
1888 return ret;
1889}
1890
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001891static int ath10k_pci_hif_power_up(struct ath10k *ar)
1892{
1893 int ret;
1894
Michal Kazior7aa7a722014-08-25 12:09:38 +02001895 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
Kalle Valo50f87a62014-03-28 09:32:52 +02001896
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001897 /*
1898 * Hardware CUS232 version 2 has some issues with cold reset and the
1899 * preferred (and safer) way to perform a device reset is through a
1900 * warm reset.
1901 *
Michal Kazior61c95ce2014-05-14 16:56:16 +03001902 * Warm reset doesn't always work though so fall back to cold reset may
1903 * be necessary.
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001904 */
Michal Kazior61c95ce2014-05-14 16:56:16 +03001905 ret = ath10k_pci_hif_power_up_warm(ar);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001906 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001907 ath10k_warn(ar, "failed to power up target using warm reset: %d\n",
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001908 ret);
1909
Kalle Valo35098462014-03-28 09:32:27 +02001910 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
1911 return ret;
1912
Michal Kazior7aa7a722014-08-25 12:09:38 +02001913 ath10k_warn(ar, "trying cold reset\n");
Kalle Valo35098462014-03-28 09:32:27 +02001914
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001915 ret = __ath10k_pci_hif_power_up(ar, true);
1916 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001917 ath10k_err(ar, "failed to power up target using cold reset too (%d)\n",
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001918 ret);
1919 return ret;
1920 }
1921 }
1922
1923 return 0;
1924}
1925
Michal Kazior8c5c5362013-07-16 09:38:50 +02001926static void ath10k_pci_hif_power_down(struct ath10k *ar)
1927{
Michal Kazior7aa7a722014-08-25 12:09:38 +02001928 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001929
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001930 ath10k_pci_warm_reset(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001931}
1932
Michal Kazior8cd13ca2013-07-16 09:38:54 +02001933#ifdef CONFIG_PM
1934
1935#define ATH10K_PCI_PM_CONTROL 0x44
1936
1937static int ath10k_pci_hif_suspend(struct ath10k *ar)
1938{
1939 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1940 struct pci_dev *pdev = ar_pci->pdev;
1941 u32 val;
1942
1943 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1944
1945 if ((val & 0x000000ff) != 0x3) {
1946 pci_save_state(pdev);
1947 pci_disable_device(pdev);
1948 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1949 (val & 0xffffff00) | 0x03);
1950 }
1951
1952 return 0;
1953}
1954
1955static int ath10k_pci_hif_resume(struct ath10k *ar)
1956{
1957 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1958 struct pci_dev *pdev = ar_pci->pdev;
1959 u32 val;
1960
1961 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1962
1963 if ((val & 0x000000ff) != 0) {
1964 pci_restore_state(pdev);
1965 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1966 val & 0xffffff00);
1967 /*
1968 * Suspend/Resume resets the PCI configuration space,
1969 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1970 * to keep PCI Tx retries from interfering with C3 CPU state
1971 */
1972 pci_read_config_dword(pdev, 0x40, &val);
1973
1974 if ((val & 0x0000ff00) != 0)
1975 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1976 }
1977
1978 return 0;
1979}
1980#endif
1981
Kalle Valo5e3dd152013-06-12 20:52:10 +03001982static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
Michal Kazior726346f2014-02-27 18:50:04 +02001983 .tx_sg = ath10k_pci_hif_tx_sg,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001984 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
1985 .start = ath10k_pci_hif_start,
1986 .stop = ath10k_pci_hif_stop,
1987 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
1988 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
1989 .send_complete_check = ath10k_pci_hif_send_complete_check,
Michal Kaziore799bbf2013-07-05 16:15:12 +03001990 .set_callbacks = ath10k_pci_hif_set_callbacks,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001991 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
Michal Kazior8c5c5362013-07-16 09:38:50 +02001992 .power_up = ath10k_pci_hif_power_up,
1993 .power_down = ath10k_pci_hif_power_down,
Michal Kazior8cd13ca2013-07-16 09:38:54 +02001994#ifdef CONFIG_PM
1995 .suspend = ath10k_pci_hif_suspend,
1996 .resume = ath10k_pci_hif_resume,
1997#endif
Kalle Valo5e3dd152013-06-12 20:52:10 +03001998};
1999
2000static void ath10k_pci_ce_tasklet(unsigned long ptr)
2001{
Michal Kazior87263e52013-08-27 13:08:01 +02002002 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002003 struct ath10k_pci *ar_pci = pipe->ar_pci;
2004
2005 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2006}
2007
2008static void ath10k_msi_err_tasklet(unsigned long data)
2009{
2010 struct ath10k *ar = (struct ath10k *)data;
2011
Michal Kazior5c771e72014-08-22 14:23:34 +02002012 if (!ath10k_pci_has_fw_crashed(ar)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002013 ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
Michal Kazior5c771e72014-08-22 14:23:34 +02002014 return;
2015 }
2016
2017 ath10k_pci_fw_crashed_clear(ar);
2018 ath10k_pci_fw_crashed_dump(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002019}
2020
2021/*
2022 * Handler for a per-engine interrupt on a PARTICULAR CE.
2023 * This is used in cases where each CE has a private MSI interrupt.
2024 */
2025static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2026{
2027 struct ath10k *ar = arg;
2028 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2029 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2030
Dan Carpentere5742672013-06-18 10:28:46 +03002031 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002032 ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
2033 ce_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002034 return IRQ_HANDLED;
2035 }
2036
2037 /*
2038 * NOTE: We are able to derive ce_id from irq because we
2039 * use a one-to-one mapping for CE's 0..5.
2040 * CE's 6 & 7 do not use interrupts at all.
2041 *
2042 * This mapping must be kept in sync with the mapping
2043 * used by firmware.
2044 */
2045 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2046 return IRQ_HANDLED;
2047}
2048
2049static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2050{
2051 struct ath10k *ar = arg;
2052 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2053
2054 tasklet_schedule(&ar_pci->msi_fw_err);
2055 return IRQ_HANDLED;
2056}
2057
2058/*
2059 * Top-level interrupt handler for all PCI interrupts from a Target.
2060 * When a block of MSI interrupts is allocated, this top-level handler
2061 * is not used; instead, we directly call the correct sub-handler.
2062 */
2063static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2064{
2065 struct ath10k *ar = arg;
2066 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2067
2068 if (ar_pci->num_msi_intrs == 0) {
Michal Kaziore5398872013-11-25 14:06:20 +01002069 if (!ath10k_pci_irq_pending(ar))
2070 return IRQ_NONE;
2071
Michal Kazior26852182013-11-25 14:06:25 +01002072 ath10k_pci_disable_and_clear_legacy_irq(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002073 }
2074
2075 tasklet_schedule(&ar_pci->intr_tq);
2076
2077 return IRQ_HANDLED;
2078}
2079
2080static void ath10k_pci_tasklet(unsigned long data)
2081{
2082 struct ath10k *ar = (struct ath10k *)data;
2083 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2084
Michal Kazior5c771e72014-08-22 14:23:34 +02002085 if (ath10k_pci_has_fw_crashed(ar)) {
2086 ath10k_pci_fw_crashed_clear(ar);
2087 ath10k_pci_fw_crashed_dump(ar);
2088 return;
2089 }
2090
Kalle Valo5e3dd152013-06-12 20:52:10 +03002091 ath10k_ce_per_engine_service_any(ar);
2092
Michal Kazior26852182013-11-25 14:06:25 +01002093 /* Re-enable legacy irq that was disabled in the irq handler */
2094 if (ar_pci->num_msi_intrs == 0)
2095 ath10k_pci_enable_legacy_irq(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002096}
2097
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002098static int ath10k_pci_request_irq_msix(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002099{
2100 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002101 int ret, i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002102
2103 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2104 ath10k_pci_msi_fw_handler,
2105 IRQF_SHARED, "ath10k_pci", ar);
Michal Kazior591ecdb2013-07-31 10:55:15 +02002106 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002107 ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
Michal Kazior591ecdb2013-07-31 10:55:15 +02002108 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002109 return ret;
Michal Kazior591ecdb2013-07-31 10:55:15 +02002110 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002111
2112 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2113 ret = request_irq(ar_pci->pdev->irq + i,
2114 ath10k_pci_per_engine_handler,
2115 IRQF_SHARED, "ath10k_pci", ar);
2116 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002117 ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002118 ar_pci->pdev->irq + i, ret);
2119
Michal Kazior87b14232013-06-26 08:50:50 +02002120 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2121 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002122
Michal Kazior87b14232013-06-26 08:50:50 +02002123 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002124 return ret;
2125 }
2126 }
2127
Kalle Valo5e3dd152013-06-12 20:52:10 +03002128 return 0;
2129}
2130
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002131static int ath10k_pci_request_irq_msi(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002132{
2133 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2134 int ret;
2135
2136 ret = request_irq(ar_pci->pdev->irq,
2137 ath10k_pci_interrupt_handler,
2138 IRQF_SHARED, "ath10k_pci", ar);
Kalle Valof3782742013-10-17 11:36:15 +03002139 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002140 ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002141 ar_pci->pdev->irq, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002142 return ret;
Kalle Valof3782742013-10-17 11:36:15 +03002143 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002144
Kalle Valo5e3dd152013-06-12 20:52:10 +03002145 return 0;
2146}
2147
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002148static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002149{
2150 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002151 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002152
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002153 ret = request_irq(ar_pci->pdev->irq,
2154 ath10k_pci_interrupt_handler,
2155 IRQF_SHARED, "ath10k_pci", ar);
Kalle Valof3782742013-10-17 11:36:15 +03002156 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002157 ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002158 ar_pci->pdev->irq, ret);
Kalle Valof3782742013-10-17 11:36:15 +03002159 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002160 }
2161
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002162 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002163}
2164
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002165static int ath10k_pci_request_irq(struct ath10k *ar)
2166{
2167 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2168
2169 switch (ar_pci->num_msi_intrs) {
2170 case 0:
2171 return ath10k_pci_request_irq_legacy(ar);
2172 case 1:
2173 return ath10k_pci_request_irq_msi(ar);
2174 case MSI_NUM_REQUEST:
2175 return ath10k_pci_request_irq_msix(ar);
2176 }
2177
Michal Kazior7aa7a722014-08-25 12:09:38 +02002178 ath10k_warn(ar, "unknown irq configuration upon request\n");
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002179 return -EINVAL;
2180}
2181
2182static void ath10k_pci_free_irq(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002183{
2184 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2185 int i;
2186
2187 /* There's at least one interrupt irregardless whether its legacy INTR
2188 * or MSI or MSI-X */
2189 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2190 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002191}
2192
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002193static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2194{
2195 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2196 int i;
2197
2198 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2199 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2200 (unsigned long)ar);
2201
2202 for (i = 0; i < CE_COUNT; i++) {
2203 ar_pci->pipe_info[i].ar_pci = ar_pci;
2204 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2205 (unsigned long)&ar_pci->pipe_info[i]);
2206 }
2207}
2208
2209static int ath10k_pci_init_irq(struct ath10k *ar)
2210{
2211 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2212 int ret;
2213
2214 ath10k_pci_init_irq_tasklets(ar);
2215
Michal Kazior403d6272014-08-22 14:23:31 +02002216 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002217 ath10k_info(ar, "limiting irq mode to: %d\n",
2218 ath10k_pci_irq_mode);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002219
2220 /* Try MSI-X */
Michal Kazior0edf2572014-08-07 11:03:29 +02002221 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002222 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
Alexander Gordeev5ad68672014-02-13 17:50:02 +02002223 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2224 ar_pci->num_msi_intrs);
2225 if (ret > 0)
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002226 return 0;
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002227
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002228 /* fall-through */
2229 }
2230
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002231 /* Try MSI */
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002232 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2233 ar_pci->num_msi_intrs = 1;
2234 ret = pci_enable_msi(ar_pci->pdev);
2235 if (ret == 0)
2236 return 0;
2237
2238 /* fall-through */
2239 }
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002240
2241 /* Try legacy irq
2242 *
2243 * A potential race occurs here: The CORE_BASE write
2244 * depends on target correctly decoding AXI address but
2245 * host won't know when target writes BAR to CORE_CTRL.
2246 * This write might get lost if target has NOT written BAR.
2247 * For now, fix the race by repeating the write in below
2248 * synchronization checking. */
2249 ar_pci->num_msi_intrs = 0;
2250
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002251 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2252 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002253
2254 return 0;
2255}
2256
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002257static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002258{
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002259 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2260 0);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002261}
2262
2263static int ath10k_pci_deinit_irq(struct ath10k *ar)
2264{
2265 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2266
2267 switch (ar_pci->num_msi_intrs) {
2268 case 0:
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002269 ath10k_pci_deinit_irq_legacy(ar);
2270 return 0;
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002271 case 1:
2272 /* fall-through */
2273 case MSI_NUM_REQUEST:
2274 pci_disable_msi(ar_pci->pdev);
2275 return 0;
Alexander Gordeevbb8b6212014-02-13 17:50:01 +02002276 default:
2277 pci_disable_msi(ar_pci->pdev);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002278 }
2279
Michal Kazior7aa7a722014-08-25 12:09:38 +02002280 ath10k_warn(ar, "unknown irq configuration upon deinit\n");
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002281 return -EINVAL;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002282}
2283
Michal Kaziord7fb47f2013-11-08 08:01:26 +01002284static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002285{
2286 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo0399eca2014-03-28 09:32:21 +02002287 unsigned long timeout;
Kalle Valo0399eca2014-03-28 09:32:21 +02002288 u32 val;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002289
Michal Kazior7aa7a722014-08-25 12:09:38 +02002290 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002291
Kalle Valo0399eca2014-03-28 09:32:21 +02002292 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2293
2294 do {
2295 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2296
Michal Kazior7aa7a722014-08-25 12:09:38 +02002297 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2298 val);
Kalle Valo50f87a62014-03-28 09:32:52 +02002299
Kalle Valo0399eca2014-03-28 09:32:21 +02002300 /* target should never return this */
2301 if (val == 0xffffffff)
2302 continue;
2303
Michal Kazior7710cd22014-04-23 19:30:04 +03002304 /* the device has crashed so don't bother trying anymore */
2305 if (val & FW_IND_EVENT_PENDING)
2306 break;
2307
Kalle Valo0399eca2014-03-28 09:32:21 +02002308 if (val & FW_IND_INITIALIZED)
2309 break;
2310
Kalle Valo5e3dd152013-06-12 20:52:10 +03002311 if (ar_pci->num_msi_intrs == 0)
2312 /* Fix potential race by repeating CORE_BASE writes */
Michal Kaziorc947a9e2014-08-22 14:23:30 +02002313 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
2314 PCIE_INTR_ENABLE_ADDRESS,
2315 PCIE_INTR_FIRMWARE_MASK |
2316 PCIE_INTR_CE_MASK_ALL);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002317
Kalle Valo0399eca2014-03-28 09:32:21 +02002318 mdelay(10);
2319 } while (time_before(jiffies, timeout));
2320
Michal Kazior6a4f6e12014-04-23 19:30:03 +03002321 if (val == 0xffffffff) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002322 ath10k_err(ar, "failed to read device register, device is gone\n");
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002323 return -EIO;
Michal Kazior6a4f6e12014-04-23 19:30:03 +03002324 }
2325
Michal Kazior7710cd22014-04-23 19:30:04 +03002326 if (val & FW_IND_EVENT_PENDING) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002327 ath10k_warn(ar, "device has crashed during init\n");
Michal Kazior5c771e72014-08-22 14:23:34 +02002328 ath10k_pci_fw_crashed_clear(ar);
Kalle Valo0e9848c2014-08-25 08:37:37 +03002329 ath10k_pci_fw_crashed_dump(ar);
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002330 return -ECOMM;
Michal Kazior7710cd22014-04-23 19:30:04 +03002331 }
2332
Michal Kazior6a4f6e12014-04-23 19:30:03 +03002333 if (!(val & FW_IND_INITIALIZED)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002334 ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
Kalle Valo0399eca2014-03-28 09:32:21 +02002335 val);
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002336 return -ETIMEDOUT;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002337 }
2338
Michal Kazior7aa7a722014-08-25 12:09:38 +02002339 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002340 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002341}
2342
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002343static int ath10k_pci_cold_reset(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002344{
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002345 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002346 u32 val;
2347
Michal Kazior7aa7a722014-08-25 12:09:38 +02002348 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002349
2350 /* Put Target, including PCIe, into RESET. */
Kalle Valoe479ed42013-09-01 10:01:53 +03002351 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002352 val |= 1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002353 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002354
2355 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Kalle Valoe479ed42013-09-01 10:01:53 +03002356 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03002357 RTC_STATE_COLD_RESET_MASK)
2358 break;
2359 msleep(1);
2360 }
2361
2362 /* Pull Target, including PCIe, out of RESET. */
2363 val &= ~1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002364 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002365
2366 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Kalle Valoe479ed42013-09-01 10:01:53 +03002367 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03002368 RTC_STATE_COLD_RESET_MASK))
2369 break;
2370 msleep(1);
2371 }
2372
Michal Kazior7aa7a722014-08-25 12:09:38 +02002373 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
Kalle Valo50f87a62014-03-28 09:32:52 +02002374
Michal Kazior5b2589f2013-11-08 08:01:30 +01002375 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002376}
2377
Michal Kazior2986e3e2014-08-07 11:03:30 +02002378static int ath10k_pci_claim(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002379{
Michal Kazior2986e3e2014-08-07 11:03:30 +02002380 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2381 struct pci_dev *pdev = ar_pci->pdev;
2382 u32 lcr_val;
2383 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002384
2385 pci_set_drvdata(pdev, ar);
2386
Kalle Valo5e3dd152013-06-12 20:52:10 +03002387 ret = pci_enable_device(pdev);
2388 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002389 ath10k_err(ar, "failed to enable pci device: %d\n", ret);
Michal Kazior2986e3e2014-08-07 11:03:30 +02002390 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002391 }
2392
Kalle Valo5e3dd152013-06-12 20:52:10 +03002393 ret = pci_request_region(pdev, BAR_NUM, "ath");
2394 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002395 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
Michal Kazior2986e3e2014-08-07 11:03:30 +02002396 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002397 goto err_device;
2398 }
2399
Michal Kazior2986e3e2014-08-07 11:03:30 +02002400 /* Target expects 32 bit DMA. Enforce it. */
Kalle Valo5e3dd152013-06-12 20:52:10 +03002401 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2402 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002403 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002404 goto err_region;
2405 }
2406
2407 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2408 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002409 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
Michal Kazior2986e3e2014-08-07 11:03:30 +02002410 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002411 goto err_region;
2412 }
2413
Kalle Valo5e3dd152013-06-12 20:52:10 +03002414 pci_set_master(pdev);
2415
Michal Kazior2986e3e2014-08-07 11:03:30 +02002416 /* Workaround: Disable ASPM */
Kalle Valo5e3dd152013-06-12 20:52:10 +03002417 pci_read_config_dword(pdev, 0x80, &lcr_val);
2418 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2419
2420 /* Arrange for access to Target SoC registers. */
Michal Kazior2986e3e2014-08-07 11:03:30 +02002421 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
2422 if (!ar_pci->mem) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002423 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002424 ret = -EIO;
2425 goto err_master;
2426 }
2427
Michal Kazior7aa7a722014-08-25 12:09:38 +02002428 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
Michal Kazior2986e3e2014-08-07 11:03:30 +02002429 return 0;
2430
2431err_master:
2432 pci_clear_master(pdev);
2433
2434err_region:
2435 pci_release_region(pdev, BAR_NUM);
2436
2437err_device:
2438 pci_disable_device(pdev);
2439
2440 return ret;
2441}
2442
2443static void ath10k_pci_release(struct ath10k *ar)
2444{
2445 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2446 struct pci_dev *pdev = ar_pci->pdev;
2447
2448 pci_iounmap(pdev, ar_pci->mem);
2449 pci_release_region(pdev, BAR_NUM);
2450 pci_clear_master(pdev);
2451 pci_disable_device(pdev);
2452}
2453
Kalle Valo5e3dd152013-06-12 20:52:10 +03002454static int ath10k_pci_probe(struct pci_dev *pdev,
2455 const struct pci_device_id *pci_dev)
2456{
Kalle Valo5e3dd152013-06-12 20:52:10 +03002457 int ret = 0;
2458 struct ath10k *ar;
2459 struct ath10k_pci *ar_pci;
Michal Kazior2986e3e2014-08-07 11:03:30 +02002460 u32 chip_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002461
Michal Kaziore7b54192014-08-07 11:03:27 +02002462 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev,
2463 &ath10k_pci_hif_ops);
2464 if (!ar) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002465 dev_err(&pdev->dev, "failed to allocate core\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002466 return -ENOMEM;
Michal Kaziore7b54192014-08-07 11:03:27 +02002467 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002468
Michal Kazior7aa7a722014-08-25 12:09:38 +02002469 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
2470
Michal Kaziore7b54192014-08-07 11:03:27 +02002471 ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002472 ar_pci->pdev = pdev;
2473 ar_pci->dev = &pdev->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002474 ar_pci->ar = ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002475
2476 spin_lock_init(&ar_pci->ce_lock);
Michal Kazior728f95e2014-08-22 14:33:14 +02002477 setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
2478 (unsigned long)ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002479
Michal Kazior2986e3e2014-08-07 11:03:30 +02002480 ret = ath10k_pci_claim(ar);
Kalle Valoe01ae682013-09-01 11:22:14 +03002481 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002482 ath10k_err(ar, "failed to claim device: %d\n", ret);
Michal Kaziore7b54192014-08-07 11:03:27 +02002483 goto err_core_destroy;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002484 }
2485
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002486 ret = ath10k_pci_wake(ar);
Kalle Valoe01ae682013-09-01 11:22:14 +03002487 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002488 ath10k_err(ar, "failed to wake up: %d\n", ret);
Michal Kazior2986e3e2014-08-07 11:03:30 +02002489 goto err_release;
Kalle Valoe01ae682013-09-01 11:22:14 +03002490 }
2491
Kalle Valo233eb972013-10-16 16:46:11 +03002492 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002493 if (chip_id == 0xffffffff) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002494 ath10k_err(ar, "failed to get chip id\n");
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002495 goto err_sleep;
2496 }
Kalle Valoe01ae682013-09-01 11:22:14 +03002497
Michal Kazior25d0dbc2014-03-28 10:02:38 +02002498 ret = ath10k_pci_alloc_ce(ar);
2499 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002500 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
2501 ret);
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002502 goto err_sleep;
Michal Kazior25d0dbc2014-03-28 10:02:38 +02002503 }
2504
Michal Kazior403d6272014-08-22 14:23:31 +02002505 ath10k_pci_ce_deinit(ar);
2506
2507 ret = ath10k_ce_disable_interrupts(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002508 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002509 ath10k_err(ar, "failed to disable copy engine interrupts: %d\n",
Michal Kazior403d6272014-08-22 14:23:31 +02002510 ret);
Michal Kazior25d0dbc2014-03-28 10:02:38 +02002511 goto err_free_ce;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002512 }
2513
Michal Kazior5c771e72014-08-22 14:23:34 +02002514 /* Workaround: There's no known way to mask all possible interrupts via
2515 * device CSR. The only way to make sure device doesn't assert
2516 * interrupts is to reset it. Interrupts are then disabled on host
2517 * after handlers are registered.
2518 */
2519 ath10k_pci_warm_reset(ar);
2520
Michal Kazior403d6272014-08-22 14:23:31 +02002521 ret = ath10k_pci_init_irq(ar);
2522 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002523 ath10k_err(ar, "failed to init irqs: %d\n", ret);
Michal Kazior403d6272014-08-22 14:23:31 +02002524 goto err_free_ce;
2525 }
2526
Michal Kazior7aa7a722014-08-25 12:09:38 +02002527 ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
Michal Kazior403d6272014-08-22 14:23:31 +02002528 ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
2529 ath10k_pci_irq_mode, ath10k_pci_reset_mode);
2530
Michal Kazior5c771e72014-08-22 14:23:34 +02002531 ret = ath10k_pci_request_irq(ar);
Michal Kazior403d6272014-08-22 14:23:31 +02002532 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002533 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
Michal Kazior403d6272014-08-22 14:23:31 +02002534 goto err_deinit_irq;
2535 }
2536
Michal Kazior5c771e72014-08-22 14:23:34 +02002537 /* This shouldn't race as the device has been reset above. */
2538 ath10k_pci_irq_disable(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002539
2540 ret = ath10k_core_register(ar, chip_id);
2541 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002542 ath10k_err(ar, "failed to register driver core: %d\n", ret);
Michal Kazior5c771e72014-08-22 14:23:34 +02002543 goto err_free_irq;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002544 }
2545
2546 return 0;
2547
Michal Kazior5c771e72014-08-22 14:23:34 +02002548err_free_irq:
2549 ath10k_pci_free_irq(ar);
2550
Michal Kazior403d6272014-08-22 14:23:31 +02002551err_deinit_irq:
2552 ath10k_pci_deinit_irq(ar);
2553
Michal Kazior25d0dbc2014-03-28 10:02:38 +02002554err_free_ce:
2555 ath10k_pci_free_ce(ar);
Michal Kazior2986e3e2014-08-07 11:03:30 +02002556
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002557err_sleep:
2558 ath10k_pci_sleep(ar);
Michal Kazior2986e3e2014-08-07 11:03:30 +02002559
2560err_release:
2561 ath10k_pci_release(ar);
2562
Michal Kaziore7b54192014-08-07 11:03:27 +02002563err_core_destroy:
Kalle Valo5e3dd152013-06-12 20:52:10 +03002564 ath10k_core_destroy(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002565
2566 return ret;
2567}
2568
2569static void ath10k_pci_remove(struct pci_dev *pdev)
2570{
2571 struct ath10k *ar = pci_get_drvdata(pdev);
2572 struct ath10k_pci *ar_pci;
2573
Michal Kazior7aa7a722014-08-25 12:09:38 +02002574 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002575
2576 if (!ar)
2577 return;
2578
2579 ar_pci = ath10k_pci_priv(ar);
2580
2581 if (!ar_pci)
2582 return;
2583
Kalle Valo5e3dd152013-06-12 20:52:10 +03002584 ath10k_core_unregister(ar);
Michal Kazior5c771e72014-08-22 14:23:34 +02002585 ath10k_pci_free_irq(ar);
Michal Kazior403d6272014-08-22 14:23:31 +02002586 ath10k_pci_deinit_irq(ar);
2587 ath10k_pci_ce_deinit(ar);
Michal Kazior25d0dbc2014-03-28 10:02:38 +02002588 ath10k_pci_free_ce(ar);
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002589 ath10k_pci_sleep(ar);
Michal Kazior2986e3e2014-08-07 11:03:30 +02002590 ath10k_pci_release(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002591 ath10k_core_destroy(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002592}
2593
Kalle Valo5e3dd152013-06-12 20:52:10 +03002594MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2595
2596static struct pci_driver ath10k_pci_driver = {
2597 .name = "ath10k_pci",
2598 .id_table = ath10k_pci_id_table,
2599 .probe = ath10k_pci_probe,
2600 .remove = ath10k_pci_remove,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002601};
2602
2603static int __init ath10k_pci_init(void)
2604{
2605 int ret;
2606
2607 ret = pci_register_driver(&ath10k_pci_driver);
2608 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002609 printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
2610 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002611
2612 return ret;
2613}
2614module_init(ath10k_pci_init);
2615
2616static void __exit ath10k_pci_exit(void)
2617{
2618 pci_unregister_driver(&ath10k_pci_driver);
2619}
2620
2621module_exit(ath10k_pci_exit);
2622
2623MODULE_AUTHOR("Qualcomm Atheros");
2624MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2625MODULE_LICENSE("Dual BSD/GPL");
Michal Kazior24c88f72014-07-25 13:32:17 +02002626MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_3_FILE);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002627MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);