blob: a69bfa41c68b897f64f7dba52e3ca1c352ec69ba [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
Kalle Valo650b91f2013-11-20 10:00:49 +020022#include <linux/bitops.h>
Kalle Valo5e3dd152013-06-12 20:52:10 +030023
24#include "core.h"
25#include "debug.h"
26
27#include "targaddrs.h"
28#include "bmi.h"
29
30#include "hif.h"
31#include "htc.h"
32
33#include "ce.h"
34#include "pci.h"
35
Michal Kaziorcfe9c452013-11-25 14:06:27 +010036enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
40};
41
Kalle Valo35098462014-03-28 09:32:27 +020042enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
45};
46
Michal Kaziorcfe9c452013-11-25 14:06:27 +010047static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
Kalle Valo35098462014-03-28 09:32:27 +020048static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
Michal Kaziorcfe9c452013-11-25 14:06:27 +010049
Michal Kaziorcfe9c452013-11-25 14:06:27 +010050module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
51MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
52
Kalle Valo35098462014-03-28 09:32:27 +020053module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
54MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
55
Kalle Valo0399eca2014-03-28 09:32:21 +020056/* how long wait to wait for target to initialise, in ms */
57#define ATH10K_PCI_TARGET_WAIT 3000
Michal Kazior61c95ce2014-05-14 16:56:16 +030058#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
Kalle Valo0399eca2014-03-28 09:32:21 +020059
Kalle Valo5e3dd152013-06-12 20:52:10 +030060#define QCA988X_2_0_DEVICE_ID (0x003c)
Michal Kaziord63955b2015-01-24 12:14:49 +020061#define QCA6174_2_1_DEVICE_ID (0x003e)
Vasanthakumar Thiagarajan8bd47022015-06-18 12:31:03 +053062#define QCA99X0_2_0_DEVICE_ID (0x0040)
Kalle Valo5e3dd152013-06-12 20:52:10 +030063
Benoit Taine9baa3c32014-08-08 15:56:03 +020064static const struct pci_device_id ath10k_pci_id_table[] = {
Kalle Valo5e3dd152013-06-12 20:52:10 +030065 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
Michal Kaziord63955b2015-01-24 12:14:49 +020066 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
Kalle Valo5e3dd152013-06-12 20:52:10 +030067 {0}
68};
69
Michal Kazior7505f7c2014-12-02 10:55:54 +020070static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
71 /* QCA988X pre 2.0 chips are not supported because they need some nasty
72 * hacks. ath10k doesn't have them and these devices crash horribly
73 * because of that.
74 */
75 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
Michal Kaziord63955b2015-01-24 12:14:49 +020076 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
77 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
78 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
79 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
80 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
Michal Kazior7505f7c2014-12-02 10:55:54 +020081};
82
Michal Kazior728f95e2014-08-22 14:33:14 +020083static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +010084static int ath10k_pci_cold_reset(struct ath10k *ar);
Vasanthakumar Thiagarajan6e4202c2015-06-18 12:31:06 +053085static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
Michal Kaziord7fb47f2013-11-08 08:01:26 +010086static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +010087static int ath10k_pci_init_irq(struct ath10k *ar);
88static int ath10k_pci_deinit_irq(struct ath10k *ar);
89static int ath10k_pci_request_irq(struct ath10k *ar);
90static void ath10k_pci_free_irq(struct ath10k *ar);
Michal Kazior85622cd2013-11-25 14:06:22 +010091static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
92 struct ath10k_ce_pipe *rx_pipe,
93 struct bmi_xfer *xfer);
Vasanthakumar Thiagarajan6e4202c2015-06-18 12:31:06 +053094static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +030095
96static const struct ce_attr host_ce_config_wlan[] = {
Kalle Valo48e9c222013-09-01 10:01:32 +030097 /* CE0: host->target HTC control and raw streams */
98 {
99 .flags = CE_ATTR_FLAGS,
100 .src_nentries = 16,
101 .src_sz_max = 256,
102 .dest_nentries = 0,
103 },
104
105 /* CE1: target->host HTT + HTC control */
106 {
107 .flags = CE_ATTR_FLAGS,
108 .src_nentries = 0,
Michal Kazior63838642015-02-09 15:04:55 +0100109 .src_sz_max = 2048,
Kalle Valo48e9c222013-09-01 10:01:32 +0300110 .dest_nentries = 512,
111 },
112
113 /* CE2: target->host WMI */
114 {
115 .flags = CE_ATTR_FLAGS,
116 .src_nentries = 0,
117 .src_sz_max = 2048,
Rajkumar Manoharan30abb332015-03-04 15:43:44 +0200118 .dest_nentries = 128,
Kalle Valo48e9c222013-09-01 10:01:32 +0300119 },
120
121 /* CE3: host->target WMI */
122 {
123 .flags = CE_ATTR_FLAGS,
124 .src_nentries = 32,
125 .src_sz_max = 2048,
126 .dest_nentries = 0,
127 },
128
129 /* CE4: host->target HTT */
130 {
131 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
132 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
133 .src_sz_max = 256,
134 .dest_nentries = 0,
135 },
136
137 /* CE5: unused */
138 {
139 .flags = CE_ATTR_FLAGS,
140 .src_nentries = 0,
141 .src_sz_max = 0,
142 .dest_nentries = 0,
143 },
144
145 /* CE6: target autonomous hif_memcpy */
146 {
147 .flags = CE_ATTR_FLAGS,
148 .src_nentries = 0,
149 .src_sz_max = 0,
150 .dest_nentries = 0,
151 },
152
153 /* CE7: ce_diag, the Diagnostic Window */
154 {
155 .flags = CE_ATTR_FLAGS,
156 .src_nentries = 2,
157 .src_sz_max = DIAG_TRANSFER_LIMIT,
158 .dest_nentries = 2,
159 },
Vasanthakumar Thiagarajan050af062015-06-18 12:31:04 +0530160
161 /* CE8: target->host pktlog */
162 {
163 .flags = CE_ATTR_FLAGS,
164 .src_nentries = 0,
165 .src_sz_max = 2048,
166 .dest_nentries = 128,
167 },
168
169 /* CE9 target autonomous qcache memcpy */
170 {
171 .flags = CE_ATTR_FLAGS,
172 .src_nentries = 0,
173 .src_sz_max = 0,
174 .dest_nentries = 0,
175 },
176
177 /* CE10: target autonomous hif memcpy */
178 {
179 .flags = CE_ATTR_FLAGS,
180 .src_nentries = 0,
181 .src_sz_max = 0,
182 .dest_nentries = 0,
183 },
184
185 /* CE11: target autonomous hif memcpy */
186 {
187 .flags = CE_ATTR_FLAGS,
188 .src_nentries = 0,
189 .src_sz_max = 0,
190 .dest_nentries = 0,
191 },
Kalle Valo5e3dd152013-06-12 20:52:10 +0300192};
193
194/* Target firmware's Copy Engine configuration. */
195static const struct ce_pipe_config target_ce_config_wlan[] = {
Kalle Valod88effb2013-09-01 10:01:39 +0300196 /* CE0: host->target HTC control and raw streams */
197 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300198 .pipenum = __cpu_to_le32(0),
199 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
200 .nentries = __cpu_to_le32(32),
201 .nbytes_max = __cpu_to_le32(256),
202 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
203 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300204 },
205
206 /* CE1: target->host HTT + HTC control */
207 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300208 .pipenum = __cpu_to_le32(1),
209 .pipedir = __cpu_to_le32(PIPEDIR_IN),
210 .nentries = __cpu_to_le32(32),
Michal Kazior63838642015-02-09 15:04:55 +0100211 .nbytes_max = __cpu_to_le32(2048),
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300212 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
213 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300214 },
215
216 /* CE2: target->host WMI */
217 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300218 .pipenum = __cpu_to_le32(2),
219 .pipedir = __cpu_to_le32(PIPEDIR_IN),
Rajkumar Manoharan30abb332015-03-04 15:43:44 +0200220 .nentries = __cpu_to_le32(64),
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300221 .nbytes_max = __cpu_to_le32(2048),
222 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
223 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300224 },
225
226 /* CE3: host->target WMI */
227 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300228 .pipenum = __cpu_to_le32(3),
229 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
230 .nentries = __cpu_to_le32(32),
231 .nbytes_max = __cpu_to_le32(2048),
232 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
233 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300234 },
235
236 /* CE4: host->target HTT */
237 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300238 .pipenum = __cpu_to_le32(4),
239 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
240 .nentries = __cpu_to_le32(256),
241 .nbytes_max = __cpu_to_le32(256),
242 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
243 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300244 },
245
Kalle Valo5e3dd152013-06-12 20:52:10 +0300246 /* NB: 50% of src nentries, since tx has 2 frags */
Kalle Valod88effb2013-09-01 10:01:39 +0300247
248 /* CE5: unused */
249 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300250 .pipenum = __cpu_to_le32(5),
251 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
252 .nentries = __cpu_to_le32(32),
253 .nbytes_max = __cpu_to_le32(2048),
254 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
255 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300256 },
257
258 /* CE6: Reserved for target autonomous hif_memcpy */
259 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300260 .pipenum = __cpu_to_le32(6),
261 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
262 .nentries = __cpu_to_le32(32),
263 .nbytes_max = __cpu_to_le32(4096),
264 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
265 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300266 },
267
Kalle Valo5e3dd152013-06-12 20:52:10 +0300268 /* CE7 used only by Host */
Vasanthakumar Thiagarajan050af062015-06-18 12:31:04 +0530269 {
270 .pipenum = __cpu_to_le32(7),
271 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
272 .nentries = __cpu_to_le32(0),
273 .nbytes_max = __cpu_to_le32(0),
274 .flags = __cpu_to_le32(0),
275 .reserved = __cpu_to_le32(0),
276 },
277
278 /* CE8 target->host packtlog */
279 {
280 .pipenum = __cpu_to_le32(8),
281 .pipedir = __cpu_to_le32(PIPEDIR_IN),
282 .nentries = __cpu_to_le32(64),
283 .nbytes_max = __cpu_to_le32(2048),
284 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
285 .reserved = __cpu_to_le32(0),
286 },
287
288 /* CE9 target autonomous qcache memcpy */
289 {
290 .pipenum = __cpu_to_le32(9),
291 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
292 .nentries = __cpu_to_le32(32),
293 .nbytes_max = __cpu_to_le32(2048),
294 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
295 .reserved = __cpu_to_le32(0),
296 },
297
298 /* It not necessary to send target wlan configuration for CE10 & CE11
299 * as these CEs are not actively used in target.
300 */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300301};
302
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300303/*
304 * Map from service/endpoint to Copy Engine.
305 * This table is derived from the CE_PCI TABLE, above.
306 * It is passed to the Target at startup for use by firmware.
307 */
308static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
309 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300310 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
311 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
312 __cpu_to_le32(3),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300313 },
314 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300315 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
316 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
317 __cpu_to_le32(2),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300318 },
319 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300320 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
321 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
322 __cpu_to_le32(3),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300323 },
324 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300325 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
326 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
327 __cpu_to_le32(2),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300328 },
329 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300330 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
331 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
332 __cpu_to_le32(3),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300333 },
334 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300335 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
336 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
337 __cpu_to_le32(2),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300338 },
339 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300340 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
341 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
342 __cpu_to_le32(3),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300343 },
344 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300345 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
346 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
347 __cpu_to_le32(2),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300348 },
349 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300350 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
351 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
352 __cpu_to_le32(3),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300353 },
354 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300355 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
356 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
357 __cpu_to_le32(2),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300358 },
359 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300360 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
361 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
362 __cpu_to_le32(0),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300363 },
364 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300365 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
366 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
367 __cpu_to_le32(1),
368 },
369 { /* not used */
370 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
371 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
372 __cpu_to_le32(0),
373 },
374 { /* not used */
375 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
376 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
377 __cpu_to_le32(1),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300378 },
379 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300380 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
381 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
382 __cpu_to_le32(4),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300383 },
384 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300385 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
386 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
387 __cpu_to_le32(1),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300388 },
389
390 /* (Additions here) */
391
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300392 { /* must be last */
393 __cpu_to_le32(0),
394 __cpu_to_le32(0),
395 __cpu_to_le32(0),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300396 },
397};
398
Michal Kazior77258d42015-05-18 09:38:18 +0000399static bool ath10k_pci_is_awake(struct ath10k *ar)
400{
401 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
402 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
403 RTC_STATE_ADDRESS);
404
405 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
406}
407
408static void __ath10k_pci_wake(struct ath10k *ar)
409{
410 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
411
412 lockdep_assert_held(&ar_pci->ps_lock);
413
414 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
415 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
416
417 iowrite32(PCIE_SOC_WAKE_V_MASK,
418 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
419 PCIE_SOC_WAKE_ADDRESS);
420}
421
422static void __ath10k_pci_sleep(struct ath10k *ar)
423{
424 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
425
426 lockdep_assert_held(&ar_pci->ps_lock);
427
428 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
429 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
430
431 iowrite32(PCIE_SOC_WAKE_RESET,
432 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
433 PCIE_SOC_WAKE_ADDRESS);
434 ar_pci->ps_awake = false;
435}
436
437static int ath10k_pci_wake_wait(struct ath10k *ar)
438{
439 int tot_delay = 0;
440 int curr_delay = 5;
441
442 while (tot_delay < PCIE_WAKE_TIMEOUT) {
443 if (ath10k_pci_is_awake(ar))
444 return 0;
445
446 udelay(curr_delay);
447 tot_delay += curr_delay;
448
449 if (curr_delay < 50)
450 curr_delay += 5;
451 }
452
453 return -ETIMEDOUT;
454}
455
456static int ath10k_pci_wake(struct ath10k *ar)
457{
458 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
459 unsigned long flags;
460 int ret = 0;
461
462 spin_lock_irqsave(&ar_pci->ps_lock, flags);
463
464 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
465 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
466
467 /* This function can be called very frequently. To avoid excessive
468 * CPU stalls for MMIO reads use a cache var to hold the device state.
469 */
470 if (!ar_pci->ps_awake) {
471 __ath10k_pci_wake(ar);
472
473 ret = ath10k_pci_wake_wait(ar);
474 if (ret == 0)
475 ar_pci->ps_awake = true;
476 }
477
478 if (ret == 0) {
479 ar_pci->ps_wake_refcount++;
480 WARN_ON(ar_pci->ps_wake_refcount == 0);
481 }
482
483 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
484
485 return ret;
486}
487
488static void ath10k_pci_sleep(struct ath10k *ar)
489{
490 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
491 unsigned long flags;
492
493 spin_lock_irqsave(&ar_pci->ps_lock, flags);
494
495 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
496 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
497
498 if (WARN_ON(ar_pci->ps_wake_refcount == 0))
499 goto skip;
500
501 ar_pci->ps_wake_refcount--;
502
503 mod_timer(&ar_pci->ps_timer, jiffies +
504 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
505
506skip:
507 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
508}
509
510static void ath10k_pci_ps_timer(unsigned long ptr)
511{
512 struct ath10k *ar = (void *)ptr;
513 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
514 unsigned long flags;
515
516 spin_lock_irqsave(&ar_pci->ps_lock, flags);
517
518 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
519 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
520
521 if (ar_pci->ps_wake_refcount > 0)
522 goto skip;
523
524 __ath10k_pci_sleep(ar);
525
526skip:
527 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
528}
529
530static void ath10k_pci_sleep_sync(struct ath10k *ar)
531{
532 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
533 unsigned long flags;
534
535 del_timer_sync(&ar_pci->ps_timer);
536
537 spin_lock_irqsave(&ar_pci->ps_lock, flags);
538 WARN_ON(ar_pci->ps_wake_refcount > 0);
539 __ath10k_pci_sleep(ar);
540 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
541}
542
543void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
544{
545 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
546 int ret;
547
Michal Kazioraeae5b42015-06-15 14:46:42 +0300548 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
549 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
550 offset, offset + sizeof(value), ar_pci->mem_len);
551 return;
552 }
553
Michal Kazior77258d42015-05-18 09:38:18 +0000554 ret = ath10k_pci_wake(ar);
555 if (ret) {
556 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
557 value, offset, ret);
558 return;
559 }
560
561 iowrite32(value, ar_pci->mem + offset);
562 ath10k_pci_sleep(ar);
563}
564
565u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
566{
567 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
568 u32 val;
569 int ret;
570
Michal Kazioraeae5b42015-06-15 14:46:42 +0300571 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
572 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
573 offset, offset + sizeof(val), ar_pci->mem_len);
574 return 0;
575 }
576
Michal Kazior77258d42015-05-18 09:38:18 +0000577 ret = ath10k_pci_wake(ar);
578 if (ret) {
579 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
580 offset, ret);
581 return 0xffffffff;
582 }
583
584 val = ioread32(ar_pci->mem + offset);
585 ath10k_pci_sleep(ar);
586
587 return val;
588}
589
590u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
591{
592 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
593}
594
595void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
596{
597 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
598}
599
600u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
601{
602 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
603}
604
605void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
606{
607 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
608}
609
Michal Kaziore5398872013-11-25 14:06:20 +0100610static bool ath10k_pci_irq_pending(struct ath10k *ar)
611{
612 u32 cause;
613
614 /* Check if the shared legacy irq is for us */
615 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
616 PCIE_INTR_CAUSE_ADDRESS);
617 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
618 return true;
619
620 return false;
621}
622
Michal Kazior26852182013-11-25 14:06:25 +0100623static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
624{
625 /* IMPORTANT: INTR_CLR register has to be set after
626 * INTR_ENABLE is set to 0, otherwise interrupt can not be
627 * really cleared. */
628 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
629 0);
630 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
631 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
632
633 /* IMPORTANT: this extra read transaction is required to
634 * flush the posted write buffer. */
Kalle Valocfbc06a2014-09-14 12:50:23 +0300635 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
636 PCIE_INTR_ENABLE_ADDRESS);
Michal Kazior26852182013-11-25 14:06:25 +0100637}
638
639static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
640{
641 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
642 PCIE_INTR_ENABLE_ADDRESS,
643 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
644
645 /* IMPORTANT: this extra read transaction is required to
646 * flush the posted write buffer. */
Kalle Valocfbc06a2014-09-14 12:50:23 +0300647 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
648 PCIE_INTR_ENABLE_ADDRESS);
Michal Kazior26852182013-11-25 14:06:25 +0100649}
650
Michal Kazior403d6272014-08-22 14:23:31 +0200651static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
Michal Kaziorab977bd2013-11-25 14:06:26 +0100652{
Michal Kaziorab977bd2013-11-25 14:06:26 +0100653 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
654
Michal Kazior403d6272014-08-22 14:23:31 +0200655 if (ar_pci->num_msi_intrs > 1)
656 return "msi-x";
Kalle Valod8bb26b2014-09-14 12:50:33 +0300657
658 if (ar_pci->num_msi_intrs == 1)
Michal Kazior403d6272014-08-22 14:23:31 +0200659 return "msi";
Kalle Valod8bb26b2014-09-14 12:50:33 +0300660
661 return "legacy";
Michal Kaziorab977bd2013-11-25 14:06:26 +0100662}
663
Michal Kazior728f95e2014-08-22 14:33:14 +0200664static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
Michal Kaziorab977bd2013-11-25 14:06:26 +0100665{
Michal Kazior728f95e2014-08-22 14:33:14 +0200666 struct ath10k *ar = pipe->hif_ce_state;
Michal Kaziorab977bd2013-11-25 14:06:26 +0100667 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior728f95e2014-08-22 14:33:14 +0200668 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
669 struct sk_buff *skb;
670 dma_addr_t paddr;
Michal Kaziorab977bd2013-11-25 14:06:26 +0100671 int ret;
672
Michal Kazior728f95e2014-08-22 14:33:14 +0200673 lockdep_assert_held(&ar_pci->ce_lock);
674
675 skb = dev_alloc_skb(pipe->buf_sz);
676 if (!skb)
677 return -ENOMEM;
678
679 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
680
681 paddr = dma_map_single(ar->dev, skb->data,
682 skb->len + skb_tailroom(skb),
683 DMA_FROM_DEVICE);
684 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200685 ath10k_warn(ar, "failed to dma map pci rx buf\n");
Michal Kazior728f95e2014-08-22 14:33:14 +0200686 dev_kfree_skb_any(skb);
687 return -EIO;
688 }
689
Michal Kazior8582bf32015-01-24 12:14:47 +0200690 ATH10K_SKB_RXCB(skb)->paddr = paddr;
Michal Kazior728f95e2014-08-22 14:33:14 +0200691
692 ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
Michal Kaziorab977bd2013-11-25 14:06:26 +0100693 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200694 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
Michal Kazior728f95e2014-08-22 14:33:14 +0200695 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
696 DMA_FROM_DEVICE);
697 dev_kfree_skb_any(skb);
Michal Kaziorab977bd2013-11-25 14:06:26 +0100698 return ret;
699 }
700
701 return 0;
702}
703
Michal Kazior728f95e2014-08-22 14:33:14 +0200704static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
Michal Kaziorab977bd2013-11-25 14:06:26 +0100705{
Michal Kazior728f95e2014-08-22 14:33:14 +0200706 struct ath10k *ar = pipe->hif_ce_state;
707 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
708 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
709 int ret, num;
710
711 lockdep_assert_held(&ar_pci->ce_lock);
712
713 if (pipe->buf_sz == 0)
714 return;
715
716 if (!ce_pipe->dest_ring)
717 return;
718
719 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
720 while (num--) {
721 ret = __ath10k_pci_rx_post_buf(pipe);
722 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200723 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
Michal Kazior728f95e2014-08-22 14:33:14 +0200724 mod_timer(&ar_pci->rx_post_retry, jiffies +
725 ATH10K_PCI_RX_POST_RETRY_MS);
726 break;
727 }
728 }
729}
730
731static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
732{
733 struct ath10k *ar = pipe->hif_ce_state;
734 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
735
736 spin_lock_bh(&ar_pci->ce_lock);
737 __ath10k_pci_rx_post_pipe(pipe);
738 spin_unlock_bh(&ar_pci->ce_lock);
739}
740
741static void ath10k_pci_rx_post(struct ath10k *ar)
742{
743 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
744 int i;
745
746 spin_lock_bh(&ar_pci->ce_lock);
747 for (i = 0; i < CE_COUNT; i++)
748 __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
749 spin_unlock_bh(&ar_pci->ce_lock);
750}
751
752static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
753{
754 struct ath10k *ar = (void *)ptr;
755
756 ath10k_pci_rx_post(ar);
Michal Kaziorab977bd2013-11-25 14:06:26 +0100757}
758
Vasanthakumar Thiagarajan418ca592015-06-18 12:31:05 +0530759static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
760{
761 u32 val = 0;
762
763 switch (ar->hw_rev) {
764 case ATH10K_HW_QCA988X:
765 case ATH10K_HW_QCA6174:
766 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
767 CORE_CTRL_ADDRESS) &
Vasanthakumar Thiagarajan3c7e2562015-07-03 19:25:27 +0530768 0x7ff) << 21;
Vasanthakumar Thiagarajan418ca592015-06-18 12:31:05 +0530769 break;
770 case ATH10K_HW_QCA99X0:
771 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
772 break;
773 }
774
775 val |= 0x100000 | (addr & 0xfffff);
776 return val;
777}
778
Kalle Valo5e3dd152013-06-12 20:52:10 +0300779/*
780 * Diagnostic read/write access is provided for startup/config/debug usage.
781 * Caller must guarantee proper alignment, when applicable, and single user
782 * at any moment.
783 */
784static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
785 int nbytes)
786{
787 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
788 int ret = 0;
789 u32 buf;
790 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
791 unsigned int id;
792 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200793 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300794 /* Host buffer address in CE space */
795 u32 ce_data;
796 dma_addr_t ce_data_base = 0;
797 void *data_buf = NULL;
798 int i;
799
Kalle Valoeef25402014-09-24 14:16:52 +0300800 spin_lock_bh(&ar_pci->ce_lock);
801
Kalle Valo5e3dd152013-06-12 20:52:10 +0300802 ce_diag = ar_pci->ce_diag;
803
804 /*
805 * Allocate a temporary bounce buffer to hold caller's data
806 * to be DMA'ed from Target. This guarantees
807 * 1) 4-byte alignment
808 * 2) Buffer in DMA-able space
809 */
810 orig_nbytes = nbytes;
Michal Kazior68c03242014-03-28 10:02:35 +0200811 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
812 orig_nbytes,
813 &ce_data_base,
814 GFP_ATOMIC);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300815
816 if (!data_buf) {
817 ret = -ENOMEM;
818 goto done;
819 }
820 memset(data_buf, 0, orig_nbytes);
821
822 remaining_bytes = orig_nbytes;
823 ce_data = ce_data_base;
824 while (remaining_bytes) {
825 nbytes = min_t(unsigned int, remaining_bytes,
826 DIAG_TRANSFER_LIMIT);
827
Kalle Valoeef25402014-09-24 14:16:52 +0300828 ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300829 if (ret != 0)
830 goto done;
831
832 /* Request CE to send from Target(!) address to Host buffer */
833 /*
834 * The address supplied by the caller is in the
835 * Target CPU virtual address space.
836 *
837 * In order to use this address with the diagnostic CE,
838 * convert it from Target CPU virtual address space
839 * to CE address space
840 */
Vasanthakumar Thiagarajan418ca592015-06-18 12:31:05 +0530841 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300842
Kalle Valoeef25402014-09-24 14:16:52 +0300843 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
844 0);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300845 if (ret)
846 goto done;
847
848 i = 0;
Kalle Valoeef25402014-09-24 14:16:52 +0300849 while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
850 &completed_nbytes,
851 &id) != 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300852 mdelay(1);
853 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
854 ret = -EBUSY;
855 goto done;
856 }
857 }
858
859 if (nbytes != completed_nbytes) {
860 ret = -EIO;
861 goto done;
862 }
863
Kalle Valocfbc06a2014-09-14 12:50:23 +0300864 if (buf != (u32)address) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300865 ret = -EIO;
866 goto done;
867 }
868
869 i = 0;
Kalle Valoeef25402014-09-24 14:16:52 +0300870 while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
871 &completed_nbytes,
872 &id, &flags) != 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300873 mdelay(1);
874
875 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
876 ret = -EBUSY;
877 goto done;
878 }
879 }
880
881 if (nbytes != completed_nbytes) {
882 ret = -EIO;
883 goto done;
884 }
885
886 if (buf != ce_data) {
887 ret = -EIO;
888 goto done;
889 }
890
891 remaining_bytes -= nbytes;
892 address += nbytes;
893 ce_data += nbytes;
894 }
895
896done:
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300897 if (ret == 0)
898 memcpy(data, data_buf, orig_nbytes);
899 else
Michal Kazior7aa7a722014-08-25 12:09:38 +0200900 ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
Kalle Valo50f87a62014-03-28 09:32:52 +0200901 address, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300902
903 if (data_buf)
Michal Kazior68c03242014-03-28 10:02:35 +0200904 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
905 ce_data_base);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300906
Kalle Valoeef25402014-09-24 14:16:52 +0300907 spin_unlock_bh(&ar_pci->ce_lock);
908
Kalle Valo5e3dd152013-06-12 20:52:10 +0300909 return ret;
910}
911
Kalle Valo3d29a3e2014-08-25 08:37:26 +0300912static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
913{
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300914 __le32 val = 0;
915 int ret;
916
917 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
918 *value = __le32_to_cpu(val);
919
920 return ret;
Kalle Valo3d29a3e2014-08-25 08:37:26 +0300921}
922
923static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
924 u32 src, u32 len)
925{
926 u32 host_addr, addr;
927 int ret;
928
929 host_addr = host_interest_item_address(src);
930
931 ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
932 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200933 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
Kalle Valo3d29a3e2014-08-25 08:37:26 +0300934 src, ret);
935 return ret;
936 }
937
938 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
939 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200940 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
Kalle Valo3d29a3e2014-08-25 08:37:26 +0300941 addr, len, ret);
942 return ret;
943 }
944
945 return 0;
946}
947
948#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
Kalle Valo8cc7f262014-09-14 12:50:39 +0300949 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
Kalle Valo3d29a3e2014-08-25 08:37:26 +0300950
Kalle Valo5e3dd152013-06-12 20:52:10 +0300951static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
952 const void *data, int nbytes)
953{
954 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
955 int ret = 0;
956 u32 buf;
957 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
958 unsigned int id;
959 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200960 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300961 void *data_buf = NULL;
962 u32 ce_data; /* Host buffer address in CE space */
963 dma_addr_t ce_data_base = 0;
964 int i;
965
Kalle Valoeef25402014-09-24 14:16:52 +0300966 spin_lock_bh(&ar_pci->ce_lock);
967
Kalle Valo5e3dd152013-06-12 20:52:10 +0300968 ce_diag = ar_pci->ce_diag;
969
970 /*
971 * Allocate a temporary bounce buffer to hold caller's data
972 * to be DMA'ed to Target. This guarantees
973 * 1) 4-byte alignment
974 * 2) Buffer in DMA-able space
975 */
976 orig_nbytes = nbytes;
Michal Kazior68c03242014-03-28 10:02:35 +0200977 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
978 orig_nbytes,
979 &ce_data_base,
980 GFP_ATOMIC);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300981 if (!data_buf) {
982 ret = -ENOMEM;
983 goto done;
984 }
985
986 /* Copy caller's data to allocated DMA buf */
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300987 memcpy(data_buf, data, orig_nbytes);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300988
989 /*
990 * The address supplied by the caller is in the
991 * Target CPU virtual address space.
992 *
993 * In order to use this address with the diagnostic CE,
994 * convert it from
995 * Target CPU virtual address space
996 * to
997 * CE address space
998 */
Vasanthakumar Thiagarajan418ca592015-06-18 12:31:05 +0530999 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001000
1001 remaining_bytes = orig_nbytes;
1002 ce_data = ce_data_base;
1003 while (remaining_bytes) {
1004 /* FIXME: check cast */
1005 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1006
1007 /* Set up to receive directly into Target(!) address */
Kalle Valoeef25402014-09-24 14:16:52 +03001008 ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001009 if (ret != 0)
1010 goto done;
1011
1012 /*
1013 * Request CE to send caller-supplied data that
1014 * was copied to bounce buffer to Target(!) address.
1015 */
Kalle Valoeef25402014-09-24 14:16:52 +03001016 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
1017 nbytes, 0, 0);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001018 if (ret != 0)
1019 goto done;
1020
1021 i = 0;
Kalle Valoeef25402014-09-24 14:16:52 +03001022 while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
1023 &completed_nbytes,
1024 &id) != 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001025 mdelay(1);
1026
1027 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1028 ret = -EBUSY;
1029 goto done;
1030 }
1031 }
1032
1033 if (nbytes != completed_nbytes) {
1034 ret = -EIO;
1035 goto done;
1036 }
1037
1038 if (buf != ce_data) {
1039 ret = -EIO;
1040 goto done;
1041 }
1042
1043 i = 0;
Kalle Valoeef25402014-09-24 14:16:52 +03001044 while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
1045 &completed_nbytes,
1046 &id, &flags) != 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001047 mdelay(1);
1048
1049 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1050 ret = -EBUSY;
1051 goto done;
1052 }
1053 }
1054
1055 if (nbytes != completed_nbytes) {
1056 ret = -EIO;
1057 goto done;
1058 }
1059
1060 if (buf != address) {
1061 ret = -EIO;
1062 goto done;
1063 }
1064
1065 remaining_bytes -= nbytes;
1066 address += nbytes;
1067 ce_data += nbytes;
1068 }
1069
1070done:
1071 if (data_buf) {
Michal Kazior68c03242014-03-28 10:02:35 +02001072 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
1073 ce_data_base);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001074 }
1075
1076 if (ret != 0)
Michal Kazior7aa7a722014-08-25 12:09:38 +02001077 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
Kalle Valo50f87a62014-03-28 09:32:52 +02001078 address, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001079
Kalle Valoeef25402014-09-24 14:16:52 +03001080 spin_unlock_bh(&ar_pci->ce_lock);
1081
Kalle Valo5e3dd152013-06-12 20:52:10 +03001082 return ret;
1083}
1084
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001085static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1086{
1087 __le32 val = __cpu_to_le32(value);
1088
1089 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1090}
1091
Kalle Valo5e3dd152013-06-12 20:52:10 +03001092/* Called by lower (CE) layer when a send to Target completes. */
Michal Kazior5440ce22013-09-03 15:09:58 +02001093static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001094{
1095 struct ath10k *ar = ce_state->ar;
1096 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2f5280d2014-02-27 18:50:05 +02001097 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
Michal Kazior1cb86d42014-11-27 11:09:38 +01001098 struct sk_buff_head list;
1099 struct sk_buff *skb;
Michal Kazior5440ce22013-09-03 15:09:58 +02001100 u32 ce_data;
1101 unsigned int nbytes;
1102 unsigned int transfer_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001103
Michal Kazior1cb86d42014-11-27 11:09:38 +01001104 __skb_queue_head_init(&list);
1105 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
1106 &nbytes, &transfer_id) == 0) {
Michal Kaziora16942e2014-02-27 18:50:04 +02001107 /* no need to call tx completion for NULL pointers */
Michal Kazior1cb86d42014-11-27 11:09:38 +01001108 if (skb == NULL)
Michal Kazior726346f2014-02-27 18:50:04 +02001109 continue;
1110
Michal Kazior1cb86d42014-11-27 11:09:38 +01001111 __skb_queue_tail(&list, skb);
Michal Kazior5440ce22013-09-03 15:09:58 +02001112 }
Michal Kazior1cb86d42014-11-27 11:09:38 +01001113
1114 while ((skb = __skb_dequeue(&list)))
1115 cb->tx_completion(ar, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001116}
1117
1118/* Called by lower (CE) layer when data is received from the Target. */
Michal Kazior5440ce22013-09-03 15:09:58 +02001119static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001120{
1121 struct ath10k *ar = ce_state->ar;
1122 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001123 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
Michal Kazior2f5280d2014-02-27 18:50:05 +02001124 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001125 struct sk_buff *skb;
Michal Kazior1cb86d42014-11-27 11:09:38 +01001126 struct sk_buff_head list;
Michal Kazior5440ce22013-09-03 15:09:58 +02001127 void *transfer_context;
1128 u32 ce_data;
Michal Kazior2f5280d2014-02-27 18:50:05 +02001129 unsigned int nbytes, max_nbytes;
Michal Kazior5440ce22013-09-03 15:09:58 +02001130 unsigned int transfer_id;
1131 unsigned int flags;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001132
Michal Kazior1cb86d42014-11-27 11:09:38 +01001133 __skb_queue_head_init(&list);
Michal Kazior5440ce22013-09-03 15:09:58 +02001134 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1135 &ce_data, &nbytes, &transfer_id,
1136 &flags) == 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001137 skb = transfer_context;
Michal Kazior2f5280d2014-02-27 18:50:05 +02001138 max_nbytes = skb->len + skb_tailroom(skb);
Michal Kazior8582bf32015-01-24 12:14:47 +02001139 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
Michal Kazior2f5280d2014-02-27 18:50:05 +02001140 max_nbytes, DMA_FROM_DEVICE);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001141
Michal Kazior2f5280d2014-02-27 18:50:05 +02001142 if (unlikely(max_nbytes < nbytes)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001143 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
Michal Kazior2f5280d2014-02-27 18:50:05 +02001144 nbytes, max_nbytes);
1145 dev_kfree_skb_any(skb);
1146 continue;
1147 }
1148
1149 skb_put(skb, nbytes);
Michal Kazior1cb86d42014-11-27 11:09:38 +01001150 __skb_queue_tail(&list, skb);
1151 }
Michal Kaziora360e542014-09-23 10:22:54 +02001152
Michal Kazior1cb86d42014-11-27 11:09:38 +01001153 while ((skb = __skb_dequeue(&list))) {
Michal Kaziora360e542014-09-23 10:22:54 +02001154 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1155 ce_state->id, skb->len);
1156 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1157 skb->data, skb->len);
1158
Michal Kazior5f07ea42014-11-27 11:09:36 +01001159 cb->rx_completion(ar, skb);
Michal Kazior2f5280d2014-02-27 18:50:05 +02001160 }
Michal Kaziorc29a3802014-07-21 21:03:10 +03001161
Michal Kazior728f95e2014-08-22 14:33:14 +02001162 ath10k_pci_rx_post_pipe(pipe_info);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001163}
1164
Michal Kazior726346f2014-02-27 18:50:04 +02001165static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1166 struct ath10k_hif_sg_item *items, int n_items)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001167{
Kalle Valo5e3dd152013-06-12 20:52:10 +03001168 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior726346f2014-02-27 18:50:04 +02001169 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1170 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1171 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
Michal Kazior7147a132014-05-26 12:02:58 +02001172 unsigned int nentries_mask;
1173 unsigned int sw_index;
1174 unsigned int write_index;
Michal Kazior08b8aa02014-05-26 12:02:59 +02001175 int err, i = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001176
Michal Kazior726346f2014-02-27 18:50:04 +02001177 spin_lock_bh(&ar_pci->ce_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001178
Michal Kazior7147a132014-05-26 12:02:58 +02001179 nentries_mask = src_ring->nentries_mask;
1180 sw_index = src_ring->sw_index;
1181 write_index = src_ring->write_index;
1182
Michal Kazior726346f2014-02-27 18:50:04 +02001183 if (unlikely(CE_RING_DELTA(nentries_mask,
1184 write_index, sw_index - 1) < n_items)) {
1185 err = -ENOBUFS;
Michal Kazior08b8aa02014-05-26 12:02:59 +02001186 goto err;
Michal Kazior726346f2014-02-27 18:50:04 +02001187 }
1188
1189 for (i = 0; i < n_items - 1; i++) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001190 ath10k_dbg(ar, ATH10K_DBG_PCI,
Michal Kazior726346f2014-02-27 18:50:04 +02001191 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1192 i, items[i].paddr, items[i].len, n_items);
Michal Kazior7aa7a722014-08-25 12:09:38 +02001193 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
Michal Kazior726346f2014-02-27 18:50:04 +02001194 items[i].vaddr, items[i].len);
1195
1196 err = ath10k_ce_send_nolock(ce_pipe,
1197 items[i].transfer_context,
1198 items[i].paddr,
1199 items[i].len,
1200 items[i].transfer_id,
1201 CE_SEND_FLAG_GATHER);
1202 if (err)
Michal Kazior08b8aa02014-05-26 12:02:59 +02001203 goto err;
Michal Kazior726346f2014-02-27 18:50:04 +02001204 }
1205
1206 /* `i` is equal to `n_items -1` after for() */
Kalle Valo5e3dd152013-06-12 20:52:10 +03001207
Michal Kazior7aa7a722014-08-25 12:09:38 +02001208 ath10k_dbg(ar, ATH10K_DBG_PCI,
Michal Kazior726346f2014-02-27 18:50:04 +02001209 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1210 i, items[i].paddr, items[i].len, n_items);
Michal Kazior7aa7a722014-08-25 12:09:38 +02001211 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
Michal Kazior726346f2014-02-27 18:50:04 +02001212 items[i].vaddr, items[i].len);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001213
Michal Kazior726346f2014-02-27 18:50:04 +02001214 err = ath10k_ce_send_nolock(ce_pipe,
1215 items[i].transfer_context,
1216 items[i].paddr,
1217 items[i].len,
1218 items[i].transfer_id,
1219 0);
1220 if (err)
Michal Kazior08b8aa02014-05-26 12:02:59 +02001221 goto err;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001222
Michal Kazior08b8aa02014-05-26 12:02:59 +02001223 spin_unlock_bh(&ar_pci->ce_lock);
1224 return 0;
1225
1226err:
1227 for (; i > 0; i--)
1228 __ath10k_ce_send_revert(ce_pipe);
1229
Michal Kazior726346f2014-02-27 18:50:04 +02001230 spin_unlock_bh(&ar_pci->ce_lock);
1231 return err;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001232}
1233
Kalle Valoeef25402014-09-24 14:16:52 +03001234static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1235 size_t buf_len)
1236{
1237 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1238}
1239
Kalle Valo5e3dd152013-06-12 20:52:10 +03001240static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1241{
1242 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo50f87a62014-03-28 09:32:52 +02001243
Michal Kazior7aa7a722014-08-25 12:09:38 +02001244 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
Kalle Valo50f87a62014-03-28 09:32:52 +02001245
Michal Kazior3efcb3b2013-10-02 11:03:41 +02001246 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001247}
1248
Ben Greear384914b2014-08-25 08:37:32 +03001249static void ath10k_pci_dump_registers(struct ath10k *ar,
1250 struct ath10k_fw_crash_data *crash_data)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001251{
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001252 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1253 int i, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001254
Ben Greear384914b2014-08-25 08:37:32 +03001255 lockdep_assert_held(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001256
Kalle Valo3d29a3e2014-08-25 08:37:26 +03001257 ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1258 hi_failure_state,
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001259 REG_DUMP_COUNT_QCA988X * sizeof(__le32));
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001260 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001261 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001262 return;
1263 }
1264
1265 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1266
Michal Kazior7aa7a722014-08-25 12:09:38 +02001267 ath10k_err(ar, "firmware register dump:\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001268 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
Michal Kazior7aa7a722014-08-25 12:09:38 +02001269 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001270 i,
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001271 __le32_to_cpu(reg_dump_values[i]),
1272 __le32_to_cpu(reg_dump_values[i + 1]),
1273 __le32_to_cpu(reg_dump_values[i + 2]),
1274 __le32_to_cpu(reg_dump_values[i + 3]));
Michal Kazioraffd3212013-07-16 09:54:35 +02001275
Michal Kazior1bbb1192014-08-25 12:13:14 +02001276 if (!crash_data)
1277 return;
1278
Ben Greear384914b2014-08-25 08:37:32 +03001279 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001280 crash_data->registers[i] = reg_dump_values[i];
Ben Greear384914b2014-08-25 08:37:32 +03001281}
1282
Kalle Valo0e9848c2014-08-25 08:37:37 +03001283static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
Ben Greear384914b2014-08-25 08:37:32 +03001284{
1285 struct ath10k_fw_crash_data *crash_data;
1286 char uuid[50];
1287
1288 spin_lock_bh(&ar->data_lock);
1289
Ben Greearf51dbe72014-09-29 14:41:46 +03001290 ar->stats.fw_crash_counter++;
1291
Ben Greear384914b2014-08-25 08:37:32 +03001292 crash_data = ath10k_debug_get_new_fw_crash_data(ar);
1293
1294 if (crash_data)
1295 scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
1296 else
1297 scnprintf(uuid, sizeof(uuid), "n/a");
1298
Michal Kazior7aa7a722014-08-25 12:09:38 +02001299 ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
Kalle Valo8a0c7972014-08-25 08:37:45 +03001300 ath10k_print_driver_info(ar);
Ben Greear384914b2014-08-25 08:37:32 +03001301 ath10k_pci_dump_registers(ar, crash_data);
1302
Ben Greear384914b2014-08-25 08:37:32 +03001303 spin_unlock_bh(&ar->data_lock);
Michal Kazioraffd3212013-07-16 09:54:35 +02001304
Michal Kazior5e90de82013-10-16 16:46:05 +03001305 queue_work(ar->workqueue, &ar->restart_work);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001306}
1307
1308static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1309 int force)
1310{
Michal Kazior7aa7a722014-08-25 12:09:38 +02001311 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
Kalle Valo50f87a62014-03-28 09:32:52 +02001312
Kalle Valo5e3dd152013-06-12 20:52:10 +03001313 if (!force) {
1314 int resources;
1315 /*
1316 * Decide whether to actually poll for completions, or just
1317 * wait for a later chance.
1318 * If there seem to be plenty of resources left, then just wait
1319 * since checking involves reading a CE register, which is a
1320 * relatively expensive operation.
1321 */
1322 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1323
1324 /*
1325 * If at least 50% of the total resources are still available,
1326 * don't bother checking again yet.
1327 */
1328 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1329 return;
1330 }
1331 ath10k_ce_per_engine_service(ar, pipe);
1332}
1333
Michal Kaziore799bbf2013-07-05 16:15:12 +03001334static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
1335 struct ath10k_hif_cb *callbacks)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001336{
1337 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1338
Michal Kazior7aa7a722014-08-25 12:09:38 +02001339 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001340
1341 memcpy(&ar_pci->msg_callbacks_current, callbacks,
1342 sizeof(ar_pci->msg_callbacks_current));
1343}
1344
Michal Kazior96a9d0d2013-11-08 08:01:25 +01001345static void ath10k_pci_kill_tasklet(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001346{
1347 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001348 int i;
1349
Kalle Valo5e3dd152013-06-12 20:52:10 +03001350 tasklet_kill(&ar_pci->intr_tq);
Michal Kazior103d4f52013-11-08 08:01:24 +01001351 tasklet_kill(&ar_pci->msi_fw_err);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001352
1353 for (i = 0; i < CE_COUNT; i++)
1354 tasklet_kill(&ar_pci->pipe_info[i].intr);
Michal Kazior728f95e2014-08-22 14:33:14 +02001355
1356 del_timer_sync(&ar_pci->rx_post_retry);
Michal Kazior96a9d0d2013-11-08 08:01:25 +01001357}
1358
Kalle Valo5e3dd152013-06-12 20:52:10 +03001359static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1360 u16 service_id, u8 *ul_pipe,
1361 u8 *dl_pipe, int *ul_is_polled,
1362 int *dl_is_polled)
1363{
Michal Kazior7c6aa252014-08-26 19:14:03 +03001364 const struct service_to_pipe *entry;
1365 bool ul_set = false, dl_set = false;
1366 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001367
Michal Kazior7aa7a722014-08-25 12:09:38 +02001368 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
Kalle Valo50f87a62014-03-28 09:32:52 +02001369
Kalle Valo5e3dd152013-06-12 20:52:10 +03001370 /* polling for received messages not supported */
1371 *dl_is_polled = 0;
1372
Michal Kazior7c6aa252014-08-26 19:14:03 +03001373 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1374 entry = &target_service_to_ce_map_wlan[i];
Kalle Valo5e3dd152013-06-12 20:52:10 +03001375
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001376 if (__le32_to_cpu(entry->service_id) != service_id)
Michal Kazior7c6aa252014-08-26 19:14:03 +03001377 continue;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001378
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001379 switch (__le32_to_cpu(entry->pipedir)) {
Michal Kazior7c6aa252014-08-26 19:14:03 +03001380 case PIPEDIR_NONE:
1381 break;
1382 case PIPEDIR_IN:
1383 WARN_ON(dl_set);
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001384 *dl_pipe = __le32_to_cpu(entry->pipenum);
Michal Kazior7c6aa252014-08-26 19:14:03 +03001385 dl_set = true;
1386 break;
1387 case PIPEDIR_OUT:
1388 WARN_ON(ul_set);
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001389 *ul_pipe = __le32_to_cpu(entry->pipenum);
Michal Kazior7c6aa252014-08-26 19:14:03 +03001390 ul_set = true;
1391 break;
1392 case PIPEDIR_INOUT:
1393 WARN_ON(dl_set);
1394 WARN_ON(ul_set);
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001395 *dl_pipe = __le32_to_cpu(entry->pipenum);
1396 *ul_pipe = __le32_to_cpu(entry->pipenum);
Michal Kazior7c6aa252014-08-26 19:14:03 +03001397 dl_set = true;
1398 ul_set = true;
1399 break;
1400 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001401 }
Michal Kazior7c6aa252014-08-26 19:14:03 +03001402
1403 if (WARN_ON(!ul_set || !dl_set))
1404 return -ENOENT;
1405
Kalle Valo5e3dd152013-06-12 20:52:10 +03001406 *ul_is_polled =
1407 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1408
Michal Kazior7c6aa252014-08-26 19:14:03 +03001409 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001410}
1411
1412static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
Kalle Valo5b07e072014-09-14 12:50:06 +03001413 u8 *ul_pipe, u8 *dl_pipe)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001414{
1415 int ul_is_polled, dl_is_polled;
1416
Michal Kazior7aa7a722014-08-25 12:09:38 +02001417 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
Kalle Valo50f87a62014-03-28 09:32:52 +02001418
Kalle Valo5e3dd152013-06-12 20:52:10 +03001419 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1420 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1421 ul_pipe,
1422 dl_pipe,
1423 &ul_is_polled,
1424 &dl_is_polled);
1425}
1426
Michal Kazior7c0f0e32014-10-20 14:14:38 +02001427static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1428{
1429 u32 val;
1430
Vasanthakumar Thiagarajan6e4202c2015-06-18 12:31:06 +05301431 switch (ar->hw_rev) {
1432 case ATH10K_HW_QCA988X:
1433 case ATH10K_HW_QCA6174:
1434 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1435 CORE_CTRL_ADDRESS);
1436 val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1437 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1438 CORE_CTRL_ADDRESS, val);
1439 break;
1440 case ATH10K_HW_QCA99X0:
1441 /* TODO: Find appropriate register configuration for QCA99X0
1442 * to mask irq/MSI.
1443 */
1444 break;
1445 }
Michal Kazior7c0f0e32014-10-20 14:14:38 +02001446}
1447
1448static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1449{
1450 u32 val;
1451
Vasanthakumar Thiagarajan6e4202c2015-06-18 12:31:06 +05301452 switch (ar->hw_rev) {
1453 case ATH10K_HW_QCA988X:
1454 case ATH10K_HW_QCA6174:
1455 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1456 CORE_CTRL_ADDRESS);
1457 val |= CORE_CTRL_PCIE_REG_31_MASK;
1458 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1459 CORE_CTRL_ADDRESS, val);
1460 break;
1461 case ATH10K_HW_QCA99X0:
1462 /* TODO: Find appropriate register configuration for QCA99X0
1463 * to unmask irq/MSI.
1464 */
1465 break;
1466 }
Michal Kazior7c0f0e32014-10-20 14:14:38 +02001467}
1468
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001469static void ath10k_pci_irq_disable(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001470{
Michal Kazior7c0f0e32014-10-20 14:14:38 +02001471 ath10k_ce_disable_interrupts(ar);
1472 ath10k_pci_disable_and_clear_legacy_irq(ar);
1473 ath10k_pci_irq_msi_fw_mask(ar);
1474}
1475
1476static void ath10k_pci_irq_sync(struct ath10k *ar)
1477{
Kalle Valo5e3dd152013-06-12 20:52:10 +03001478 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001479 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001480
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001481 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1482 synchronize_irq(ar_pci->pdev->irq + i);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001483}
1484
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001485static void ath10k_pci_irq_enable(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001486{
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001487 ath10k_ce_enable_interrupts(ar);
Michal Kaziore75db4e2014-08-28 22:14:16 +03001488 ath10k_pci_enable_legacy_irq(ar);
Michal Kazior7c0f0e32014-10-20 14:14:38 +02001489 ath10k_pci_irq_msi_fw_unmask(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001490}
1491
1492static int ath10k_pci_hif_start(struct ath10k *ar)
1493{
Janusz Dziedzic76d870e2015-05-18 09:38:16 +00001494 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior7aa7a722014-08-25 12:09:38 +02001495 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001496
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001497 ath10k_pci_irq_enable(ar);
Michal Kazior728f95e2014-08-22 14:33:14 +02001498 ath10k_pci_rx_post(ar);
Kalle Valo50f87a62014-03-28 09:32:52 +02001499
Janusz Dziedzic76d870e2015-05-18 09:38:16 +00001500 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1501 ar_pci->link_ctl);
1502
Kalle Valo5e3dd152013-06-12 20:52:10 +03001503 return 0;
1504}
1505
Michal Kazior099ac7c2014-10-28 10:32:05 +01001506static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001507{
1508 struct ath10k *ar;
Michal Kazior099ac7c2014-10-28 10:32:05 +01001509 struct ath10k_ce_pipe *ce_pipe;
1510 struct ath10k_ce_ring *ce_ring;
1511 struct sk_buff *skb;
1512 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001513
Michal Kazior099ac7c2014-10-28 10:32:05 +01001514 ar = pci_pipe->hif_ce_state;
1515 ce_pipe = pci_pipe->ce_hdl;
1516 ce_ring = ce_pipe->dest_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001517
Michal Kazior099ac7c2014-10-28 10:32:05 +01001518 if (!ce_ring)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001519 return;
1520
Michal Kazior099ac7c2014-10-28 10:32:05 +01001521 if (!pci_pipe->buf_sz)
1522 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001523
Michal Kazior099ac7c2014-10-28 10:32:05 +01001524 for (i = 0; i < ce_ring->nentries; i++) {
1525 skb = ce_ring->per_transfer_context[i];
1526 if (!skb)
1527 continue;
1528
1529 ce_ring->per_transfer_context[i] = NULL;
1530
Michal Kazior8582bf32015-01-24 12:14:47 +02001531 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
Michal Kazior099ac7c2014-10-28 10:32:05 +01001532 skb->len + skb_tailroom(skb),
Kalle Valo5e3dd152013-06-12 20:52:10 +03001533 DMA_FROM_DEVICE);
Michal Kazior099ac7c2014-10-28 10:32:05 +01001534 dev_kfree_skb_any(skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001535 }
1536}
1537
Michal Kazior099ac7c2014-10-28 10:32:05 +01001538static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001539{
1540 struct ath10k *ar;
1541 struct ath10k_pci *ar_pci;
Michal Kazior099ac7c2014-10-28 10:32:05 +01001542 struct ath10k_ce_pipe *ce_pipe;
1543 struct ath10k_ce_ring *ce_ring;
1544 struct ce_desc *ce_desc;
1545 struct sk_buff *skb;
Michal Kazior099ac7c2014-10-28 10:32:05 +01001546 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001547
Michal Kazior099ac7c2014-10-28 10:32:05 +01001548 ar = pci_pipe->hif_ce_state;
1549 ar_pci = ath10k_pci_priv(ar);
1550 ce_pipe = pci_pipe->ce_hdl;
1551 ce_ring = ce_pipe->src_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001552
Michal Kazior099ac7c2014-10-28 10:32:05 +01001553 if (!ce_ring)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001554 return;
1555
Michal Kazior099ac7c2014-10-28 10:32:05 +01001556 if (!pci_pipe->buf_sz)
1557 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001558
Michal Kazior099ac7c2014-10-28 10:32:05 +01001559 ce_desc = ce_ring->shadow_base;
1560 if (WARN_ON(!ce_desc))
1561 return;
1562
1563 for (i = 0; i < ce_ring->nentries; i++) {
1564 skb = ce_ring->per_transfer_context[i];
1565 if (!skb)
Michal Kazior2415fc12013-11-08 08:01:32 +01001566 continue;
Michal Kazior2415fc12013-11-08 08:01:32 +01001567
Michal Kazior099ac7c2014-10-28 10:32:05 +01001568 ce_ring->per_transfer_context[i] = NULL;
Michal Kazior099ac7c2014-10-28 10:32:05 +01001569
Michal Kaziord84a5122014-11-27 11:09:37 +01001570 ar_pci->msg_callbacks_current.tx_completion(ar, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001571 }
1572}
1573
1574/*
1575 * Cleanup residual buffers for device shutdown:
1576 * buffers that were enqueued for receive
1577 * buffers that were to be sent
1578 * Note: Buffers that had completed but which were
1579 * not yet processed are on a completion queue. They
1580 * are handled when the completion thread shuts down.
1581 */
1582static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1583{
1584 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1585 int pipe_num;
1586
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001587 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Michal Kazior87263e52013-08-27 13:08:01 +02001588 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001589
1590 pipe_info = &ar_pci->pipe_info[pipe_num];
1591 ath10k_pci_rx_pipe_cleanup(pipe_info);
1592 ath10k_pci_tx_pipe_cleanup(pipe_info);
1593 }
1594}
1595
1596static void ath10k_pci_ce_deinit(struct ath10k *ar)
1597{
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001598 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001599
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001600 for (i = 0; i < CE_COUNT; i++)
1601 ath10k_ce_deinit_pipe(ar, i);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001602}
1603
Michal Kazior728f95e2014-08-22 14:33:14 +02001604static void ath10k_pci_flush(struct ath10k *ar)
1605{
1606 ath10k_pci_kill_tasklet(ar);
1607 ath10k_pci_buffer_cleanup(ar);
1608}
1609
Kalle Valo5e3dd152013-06-12 20:52:10 +03001610static void ath10k_pci_hif_stop(struct ath10k *ar)
1611{
Michal Kazior77258d42015-05-18 09:38:18 +00001612 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1613 unsigned long flags;
1614
Michal Kazior7aa7a722014-08-25 12:09:38 +02001615 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
Michal Kazior32270b62013-08-02 09:15:47 +02001616
Michal Kazior10d23db2014-08-22 14:33:15 +02001617 /* Most likely the device has HTT Rx ring configured. The only way to
1618 * prevent the device from accessing (and possible corrupting) host
1619 * memory is to reset the chip now.
Michal Kaziore75db4e2014-08-28 22:14:16 +03001620 *
1621 * There's also no known way of masking MSI interrupts on the device.
1622 * For ranged MSI the CE-related interrupts can be masked. However
1623 * regardless how many MSI interrupts are assigned the first one
1624 * is always used for firmware indications (crashes) and cannot be
1625 * masked. To prevent the device from asserting the interrupt reset it
1626 * before proceeding with cleanup.
Michal Kazior10d23db2014-08-22 14:33:15 +02001627 */
Vasanthakumar Thiagarajan6e4202c2015-06-18 12:31:06 +05301628 ath10k_pci_safe_chip_reset(ar);
Michal Kaziore75db4e2014-08-28 22:14:16 +03001629
1630 ath10k_pci_irq_disable(ar);
Michal Kazior7c0f0e32014-10-20 14:14:38 +02001631 ath10k_pci_irq_sync(ar);
Michal Kaziore75db4e2014-08-28 22:14:16 +03001632 ath10k_pci_flush(ar);
Michal Kazior77258d42015-05-18 09:38:18 +00001633
1634 spin_lock_irqsave(&ar_pci->ps_lock, flags);
1635 WARN_ON(ar_pci->ps_wake_refcount > 0);
1636 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001637}
1638
1639static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1640 void *req, u32 req_len,
1641 void *resp, u32 *resp_len)
1642{
1643 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001644 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1645 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1646 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1647 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001648 dma_addr_t req_paddr = 0;
1649 dma_addr_t resp_paddr = 0;
1650 struct bmi_xfer xfer = {};
1651 void *treq, *tresp = NULL;
1652 int ret = 0;
1653
Michal Kazior85622cd2013-11-25 14:06:22 +01001654 might_sleep();
1655
Kalle Valo5e3dd152013-06-12 20:52:10 +03001656 if (resp && !resp_len)
1657 return -EINVAL;
1658
1659 if (resp && resp_len && *resp_len == 0)
1660 return -EINVAL;
1661
1662 treq = kmemdup(req, req_len, GFP_KERNEL);
1663 if (!treq)
1664 return -ENOMEM;
1665
1666 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1667 ret = dma_mapping_error(ar->dev, req_paddr);
1668 if (ret)
1669 goto err_dma;
1670
1671 if (resp && resp_len) {
1672 tresp = kzalloc(*resp_len, GFP_KERNEL);
1673 if (!tresp) {
1674 ret = -ENOMEM;
1675 goto err_req;
1676 }
1677
1678 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1679 DMA_FROM_DEVICE);
1680 ret = dma_mapping_error(ar->dev, resp_paddr);
1681 if (ret)
1682 goto err_req;
1683
1684 xfer.wait_for_resp = true;
1685 xfer.resp_len = 0;
1686
Michal Kazior728f95e2014-08-22 14:33:14 +02001687 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001688 }
1689
Kalle Valo5e3dd152013-06-12 20:52:10 +03001690 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1691 if (ret)
1692 goto err_resp;
1693
Michal Kazior85622cd2013-11-25 14:06:22 +01001694 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1695 if (ret) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001696 u32 unused_buffer;
1697 unsigned int unused_nbytes;
1698 unsigned int unused_id;
1699
Kalle Valo5e3dd152013-06-12 20:52:10 +03001700 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1701 &unused_nbytes, &unused_id);
1702 } else {
1703 /* non-zero means we did not time out */
1704 ret = 0;
1705 }
1706
1707err_resp:
1708 if (resp) {
1709 u32 unused_buffer;
1710
1711 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1712 dma_unmap_single(ar->dev, resp_paddr,
1713 *resp_len, DMA_FROM_DEVICE);
1714 }
1715err_req:
1716 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1717
1718 if (ret == 0 && resp_len) {
1719 *resp_len = min(*resp_len, xfer.resp_len);
1720 memcpy(resp, tresp, xfer.resp_len);
1721 }
1722err_dma:
1723 kfree(treq);
1724 kfree(tresp);
1725
1726 return ret;
1727}
1728
Michal Kazior5440ce22013-09-03 15:09:58 +02001729static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001730{
Michal Kazior5440ce22013-09-03 15:09:58 +02001731 struct bmi_xfer *xfer;
1732 u32 ce_data;
1733 unsigned int nbytes;
1734 unsigned int transfer_id;
1735
1736 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1737 &nbytes, &transfer_id))
1738 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001739
Michal Kazior2374b182014-07-14 16:25:25 +03001740 xfer->tx_done = true;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001741}
1742
Michal Kazior5440ce22013-09-03 15:09:58 +02001743static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001744{
Michal Kazior7aa7a722014-08-25 12:09:38 +02001745 struct ath10k *ar = ce_state->ar;
Michal Kazior5440ce22013-09-03 15:09:58 +02001746 struct bmi_xfer *xfer;
1747 u32 ce_data;
1748 unsigned int nbytes;
1749 unsigned int transfer_id;
1750 unsigned int flags;
1751
1752 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1753 &nbytes, &transfer_id, &flags))
1754 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001755
Michal Kazior04ed9df2014-10-28 10:34:36 +01001756 if (WARN_ON_ONCE(!xfer))
1757 return;
1758
Kalle Valo5e3dd152013-06-12 20:52:10 +03001759 if (!xfer->wait_for_resp) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001760 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001761 return;
1762 }
1763
1764 xfer->resp_len = nbytes;
Michal Kazior2374b182014-07-14 16:25:25 +03001765 xfer->rx_done = true;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001766}
1767
Michal Kazior85622cd2013-11-25 14:06:22 +01001768static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1769 struct ath10k_ce_pipe *rx_pipe,
1770 struct bmi_xfer *xfer)
1771{
1772 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1773
1774 while (time_before_eq(jiffies, timeout)) {
1775 ath10k_pci_bmi_send_done(tx_pipe);
1776 ath10k_pci_bmi_recv_data(rx_pipe);
1777
Michal Kazior2374b182014-07-14 16:25:25 +03001778 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
Michal Kazior85622cd2013-11-25 14:06:22 +01001779 return 0;
1780
1781 schedule();
1782 }
1783
1784 return -ETIMEDOUT;
1785}
1786
Kalle Valo5e3dd152013-06-12 20:52:10 +03001787/*
Kalle Valo5e3dd152013-06-12 20:52:10 +03001788 * Send an interrupt to the device to wake up the Target CPU
1789 * so it has an opportunity to notice any changed state.
1790 */
1791static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1792{
Michal Kazior9e264942014-09-02 11:00:21 +03001793 u32 addr, val;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001794
Michal Kazior9e264942014-09-02 11:00:21 +03001795 addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
1796 val = ath10k_pci_read32(ar, addr);
1797 val |= CORE_CTRL_CPU_INTR_MASK;
1798 ath10k_pci_write32(ar, addr, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001799
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001800 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001801}
1802
Michal Kaziord63955b2015-01-24 12:14:49 +02001803static int ath10k_pci_get_num_banks(struct ath10k *ar)
1804{
1805 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1806
1807 switch (ar_pci->pdev->device) {
1808 case QCA988X_2_0_DEVICE_ID:
Vasanthakumar Thiagarajan8bd47022015-06-18 12:31:03 +05301809 case QCA99X0_2_0_DEVICE_ID:
Michal Kaziord63955b2015-01-24 12:14:49 +02001810 return 1;
1811 case QCA6174_2_1_DEVICE_ID:
1812 switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
1813 case QCA6174_HW_1_0_CHIP_ID_REV:
1814 case QCA6174_HW_1_1_CHIP_ID_REV:
Michal Kazior11a002e2015-04-20 09:20:41 +00001815 case QCA6174_HW_2_1_CHIP_ID_REV:
1816 case QCA6174_HW_2_2_CHIP_ID_REV:
Michal Kaziord63955b2015-01-24 12:14:49 +02001817 return 3;
1818 case QCA6174_HW_1_3_CHIP_ID_REV:
1819 return 2;
Michal Kaziord63955b2015-01-24 12:14:49 +02001820 case QCA6174_HW_3_0_CHIP_ID_REV:
1821 case QCA6174_HW_3_1_CHIP_ID_REV:
1822 case QCA6174_HW_3_2_CHIP_ID_REV:
1823 return 9;
1824 }
1825 break;
1826 }
1827
1828 ath10k_warn(ar, "unknown number of banks, assuming 1\n");
1829 return 1;
1830}
1831
Kalle Valo5e3dd152013-06-12 20:52:10 +03001832static int ath10k_pci_init_config(struct ath10k *ar)
1833{
1834 u32 interconnect_targ_addr;
1835 u32 pcie_state_targ_addr = 0;
1836 u32 pipe_cfg_targ_addr = 0;
1837 u32 svc_to_pipe_map = 0;
1838 u32 pcie_config_flags = 0;
1839 u32 ealloc_value;
1840 u32 ealloc_targ_addr;
1841 u32 flag2_value;
1842 u32 flag2_targ_addr;
1843 int ret = 0;
1844
1845 /* Download to Target the CE Config and the service-to-CE map */
1846 interconnect_targ_addr =
1847 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1848
1849 /* Supply Target-side CE configuration */
Michal Kazior9e264942014-09-02 11:00:21 +03001850 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
1851 &pcie_state_targ_addr);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001852 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001853 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001854 return ret;
1855 }
1856
1857 if (pcie_state_targ_addr == 0) {
1858 ret = -EIO;
Michal Kazior7aa7a722014-08-25 12:09:38 +02001859 ath10k_err(ar, "Invalid pcie state addr\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001860 return ret;
1861 }
1862
Michal Kazior9e264942014-09-02 11:00:21 +03001863 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
Kalle Valo5e3dd152013-06-12 20:52:10 +03001864 offsetof(struct pcie_state,
Michal Kazior9e264942014-09-02 11:00:21 +03001865 pipe_cfg_addr)),
1866 &pipe_cfg_targ_addr);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001867 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001868 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001869 return ret;
1870 }
1871
1872 if (pipe_cfg_targ_addr == 0) {
1873 ret = -EIO;
Michal Kazior7aa7a722014-08-25 12:09:38 +02001874 ath10k_err(ar, "Invalid pipe cfg addr\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001875 return ret;
1876 }
1877
1878 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
Kalle Valo5b07e072014-09-14 12:50:06 +03001879 target_ce_config_wlan,
Vasanthakumar Thiagarajan050af062015-06-18 12:31:04 +05301880 sizeof(struct ce_pipe_config) *
1881 NUM_TARGET_CE_CONFIG_WLAN);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001882
1883 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001884 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001885 return ret;
1886 }
1887
Michal Kazior9e264942014-09-02 11:00:21 +03001888 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
Kalle Valo5e3dd152013-06-12 20:52:10 +03001889 offsetof(struct pcie_state,
Michal Kazior9e264942014-09-02 11:00:21 +03001890 svc_to_pipe_map)),
1891 &svc_to_pipe_map);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001892 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001893 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001894 return ret;
1895 }
1896
1897 if (svc_to_pipe_map == 0) {
1898 ret = -EIO;
Michal Kazior7aa7a722014-08-25 12:09:38 +02001899 ath10k_err(ar, "Invalid svc_to_pipe map\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001900 return ret;
1901 }
1902
1903 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
Kalle Valo5b07e072014-09-14 12:50:06 +03001904 target_service_to_ce_map_wlan,
1905 sizeof(target_service_to_ce_map_wlan));
Kalle Valo5e3dd152013-06-12 20:52:10 +03001906 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001907 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001908 return ret;
1909 }
1910
Michal Kazior9e264942014-09-02 11:00:21 +03001911 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
Kalle Valo5e3dd152013-06-12 20:52:10 +03001912 offsetof(struct pcie_state,
Michal Kazior9e264942014-09-02 11:00:21 +03001913 config_flags)),
1914 &pcie_config_flags);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001915 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001916 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001917 return ret;
1918 }
1919
1920 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1921
Michal Kazior9e264942014-09-02 11:00:21 +03001922 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
1923 offsetof(struct pcie_state,
1924 config_flags)),
1925 pcie_config_flags);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001926 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001927 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001928 return ret;
1929 }
1930
1931 /* configure early allocation */
1932 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1933
Michal Kazior9e264942014-09-02 11:00:21 +03001934 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001935 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001936 ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001937 return ret;
1938 }
1939
1940 /* first bank is switched to IRAM */
1941 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1942 HI_EARLY_ALLOC_MAGIC_MASK);
Michal Kaziord63955b2015-01-24 12:14:49 +02001943 ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
1944 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03001945 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1946
Michal Kazior9e264942014-09-02 11:00:21 +03001947 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001948 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001949 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001950 return ret;
1951 }
1952
1953 /* Tell Target to proceed with initialization */
1954 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1955
Michal Kazior9e264942014-09-02 11:00:21 +03001956 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001957 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001958 ath10k_err(ar, "Failed to get option val: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001959 return ret;
1960 }
1961
1962 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1963
Michal Kazior9e264942014-09-02 11:00:21 +03001964 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001965 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001966 ath10k_err(ar, "Failed to set option val: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001967 return ret;
1968 }
1969
1970 return 0;
1971}
1972
Michal Kazior84cbf3a2014-10-20 14:14:39 +02001973static int ath10k_pci_alloc_pipes(struct ath10k *ar)
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001974{
Michal Kazior84cbf3a2014-10-20 14:14:39 +02001975 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1976 struct ath10k_pci_pipe *pipe;
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001977 int i, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001978
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001979 for (i = 0; i < CE_COUNT; i++) {
Michal Kazior84cbf3a2014-10-20 14:14:39 +02001980 pipe = &ar_pci->pipe_info[i];
1981 pipe->ce_hdl = &ar_pci->ce_states[i];
1982 pipe->pipe_num = i;
1983 pipe->hif_ce_state = ar;
1984
1985 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
1986 ath10k_pci_ce_send_done,
1987 ath10k_pci_ce_recv_data);
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001988 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001989 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001990 i, ret);
1991 return ret;
1992 }
Michal Kazior84cbf3a2014-10-20 14:14:39 +02001993
1994 /* Last CE is Diagnostic Window */
Vasanthakumar Thiagarajan050af062015-06-18 12:31:04 +05301995 if (i == CE_DIAG_PIPE) {
Michal Kazior84cbf3a2014-10-20 14:14:39 +02001996 ar_pci->ce_diag = pipe->ce_hdl;
1997 continue;
1998 }
1999
2000 pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
Michal Kazior25d0dbc2014-03-28 10:02:38 +02002001 }
2002
2003 return 0;
2004}
2005
Michal Kazior84cbf3a2014-10-20 14:14:39 +02002006static void ath10k_pci_free_pipes(struct ath10k *ar)
Michal Kazior25d0dbc2014-03-28 10:02:38 +02002007{
2008 int i;
2009
2010 for (i = 0; i < CE_COUNT; i++)
2011 ath10k_ce_free_pipe(ar, i);
2012}
Kalle Valo5e3dd152013-06-12 20:52:10 +03002013
Michal Kazior84cbf3a2014-10-20 14:14:39 +02002014static int ath10k_pci_init_pipes(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002015{
Michal Kazior84cbf3a2014-10-20 14:14:39 +02002016 int i, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002017
Michal Kazior84cbf3a2014-10-20 14:14:39 +02002018 for (i = 0; i < CE_COUNT; i++) {
2019 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
Michal Kazior25d0dbc2014-03-28 10:02:38 +02002020 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002021 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
Michal Kazior84cbf3a2014-10-20 14:14:39 +02002022 i, ret);
Michal Kazior25d0dbc2014-03-28 10:02:38 +02002023 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002024 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002025 }
2026
Kalle Valo5e3dd152013-06-12 20:52:10 +03002027 return 0;
2028}
2029
Michal Kazior5c771e72014-08-22 14:23:34 +02002030static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002031{
Michal Kazior5c771e72014-08-22 14:23:34 +02002032 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2033 FW_IND_EVENT_PENDING;
2034}
Kalle Valo5e3dd152013-06-12 20:52:10 +03002035
Michal Kazior5c771e72014-08-22 14:23:34 +02002036static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2037{
2038 u32 val;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002039
Michal Kazior5c771e72014-08-22 14:23:34 +02002040 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2041 val &= ~FW_IND_EVENT_PENDING;
2042 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002043}
2044
Michal Kaziorde013572014-05-14 16:56:16 +03002045/* this function effectively clears target memory controller assert line */
2046static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2047{
2048 u32 val;
2049
2050 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2051 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2052 val | SOC_RESET_CONTROL_SI0_RST_MASK);
2053 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2054
2055 msleep(10);
2056
2057 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2058 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2059 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2060 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2061
2062 msleep(10);
2063}
2064
Michal Kazior61c16482014-10-28 10:32:06 +01002065static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002066{
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002067 u32 val;
2068
Kalle Valob39712c2014-03-28 09:32:46 +02002069 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002070
Michal Kazior61c16482014-10-28 10:32:06 +01002071 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2072 SOC_RESET_CONTROL_ADDRESS);
2073 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2074 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2075}
2076
2077static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2078{
2079 u32 val;
2080
2081 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2082 SOC_RESET_CONTROL_ADDRESS);
2083
2084 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2085 val | SOC_RESET_CONTROL_CE_RST_MASK);
2086 msleep(10);
2087 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2088 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2089}
2090
2091static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2092{
2093 u32 val;
2094
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002095 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2096 SOC_LF_TIMER_CONTROL0_ADDRESS);
2097 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
2098 SOC_LF_TIMER_CONTROL0_ADDRESS,
2099 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
Michal Kazior61c16482014-10-28 10:32:06 +01002100}
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002101
Michal Kazior61c16482014-10-28 10:32:06 +01002102static int ath10k_pci_warm_reset(struct ath10k *ar)
2103{
2104 int ret;
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002105
Michal Kazior61c16482014-10-28 10:32:06 +01002106 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002107
Michal Kazior61c16482014-10-28 10:32:06 +01002108 spin_lock_bh(&ar->data_lock);
2109 ar->stats.fw_warm_reset_counter++;
2110 spin_unlock_bh(&ar->data_lock);
2111
2112 ath10k_pci_irq_disable(ar);
2113
2114 /* Make sure the target CPU is not doing anything dangerous, e.g. if it
2115 * were to access copy engine while host performs copy engine reset
2116 * then it is possible for the device to confuse pci-e controller to
2117 * the point of bringing host system to a complete stop (i.e. hang).
2118 */
Michal Kaziorde013572014-05-14 16:56:16 +03002119 ath10k_pci_warm_reset_si0(ar);
Michal Kazior61c16482014-10-28 10:32:06 +01002120 ath10k_pci_warm_reset_cpu(ar);
2121 ath10k_pci_init_pipes(ar);
2122 ath10k_pci_wait_for_target_init(ar);
Michal Kaziorde013572014-05-14 16:56:16 +03002123
Michal Kazior61c16482014-10-28 10:32:06 +01002124 ath10k_pci_warm_reset_clear_lf(ar);
2125 ath10k_pci_warm_reset_ce(ar);
2126 ath10k_pci_warm_reset_cpu(ar);
2127 ath10k_pci_init_pipes(ar);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002128
Michal Kazior61c16482014-10-28 10:32:06 +01002129 ret = ath10k_pci_wait_for_target_init(ar);
2130 if (ret) {
2131 ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2132 return ret;
2133 }
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002134
Michal Kazior7aa7a722014-08-25 12:09:38 +02002135 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002136
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002137 return 0;
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002138}
2139
Vasanthakumar Thiagarajan6e4202c2015-06-18 12:31:06 +05302140static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2141{
2142 if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) {
2143 return ath10k_pci_warm_reset(ar);
2144 } else if (QCA_REV_99X0(ar)) {
2145 ath10k_pci_irq_disable(ar);
2146 return ath10k_pci_qca99x0_chip_reset(ar);
2147 } else {
2148 return -ENOTSUPP;
2149 }
2150}
2151
Michal Kaziord63955b2015-01-24 12:14:49 +02002152static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
Michal Kazior0bc14d02014-10-28 10:32:07 +01002153{
2154 int i, ret;
2155 u32 val;
2156
Michal Kaziord63955b2015-01-24 12:14:49 +02002157 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
Michal Kazior0bc14d02014-10-28 10:32:07 +01002158
2159 /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2160 * It is thus preferred to use warm reset which is safer but may not be
2161 * able to recover the device from all possible fail scenarios.
2162 *
2163 * Warm reset doesn't always work on first try so attempt it a few
2164 * times before giving up.
2165 */
2166 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2167 ret = ath10k_pci_warm_reset(ar);
2168 if (ret) {
2169 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2170 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2171 ret);
2172 continue;
2173 }
2174
2175 /* FIXME: Sometimes copy engine doesn't recover after warm
2176 * reset. In most cases this needs cold reset. In some of these
2177 * cases the device is in such a state that a cold reset may
2178 * lock up the host.
2179 *
2180 * Reading any host interest register via copy engine is
2181 * sufficient to verify if device is capable of booting
2182 * firmware blob.
2183 */
2184 ret = ath10k_pci_init_pipes(ar);
2185 if (ret) {
2186 ath10k_warn(ar, "failed to init copy engine: %d\n",
2187 ret);
2188 continue;
2189 }
2190
2191 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2192 &val);
2193 if (ret) {
2194 ath10k_warn(ar, "failed to poke copy engine: %d\n",
2195 ret);
2196 continue;
2197 }
2198
2199 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2200 return 0;
2201 }
2202
2203 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2204 ath10k_warn(ar, "refusing cold reset as requested\n");
2205 return -EPERM;
2206 }
2207
2208 ret = ath10k_pci_cold_reset(ar);
2209 if (ret) {
2210 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2211 return ret;
2212 }
2213
2214 ret = ath10k_pci_wait_for_target_init(ar);
2215 if (ret) {
2216 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2217 ret);
2218 return ret;
2219 }
2220
Michal Kaziord63955b2015-01-24 12:14:49 +02002221 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
Michal Kazior0bc14d02014-10-28 10:32:07 +01002222
2223 return 0;
2224}
2225
Michal Kaziord63955b2015-01-24 12:14:49 +02002226static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2227{
2228 int ret;
2229
2230 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2231
2232 /* FIXME: QCA6174 requires cold + warm reset to work. */
2233
2234 ret = ath10k_pci_cold_reset(ar);
2235 if (ret) {
2236 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2237 return ret;
2238 }
2239
2240 ret = ath10k_pci_wait_for_target_init(ar);
2241 if (ret) {
2242 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2243 ret);
2244 return ret;
2245 }
2246
2247 ret = ath10k_pci_warm_reset(ar);
2248 if (ret) {
2249 ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2250 return ret;
2251 }
2252
2253 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2254
2255 return 0;
2256}
2257
Vasanthakumar Thiagarajan6e4202c2015-06-18 12:31:06 +05302258static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2259{
2260 int ret;
2261
2262 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2263
2264 ret = ath10k_pci_cold_reset(ar);
2265 if (ret) {
2266 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2267 return ret;
2268 }
2269
2270 ret = ath10k_pci_wait_for_target_init(ar);
2271 if (ret) {
2272 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2273 ret);
2274 return ret;
2275 }
2276
2277 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2278
2279 return 0;
2280}
2281
Michal Kaziord63955b2015-01-24 12:14:49 +02002282static int ath10k_pci_chip_reset(struct ath10k *ar)
2283{
2284 if (QCA_REV_988X(ar))
2285 return ath10k_pci_qca988x_chip_reset(ar);
2286 else if (QCA_REV_6174(ar))
2287 return ath10k_pci_qca6174_chip_reset(ar);
Vasanthakumar Thiagarajan6e4202c2015-06-18 12:31:06 +05302288 else if (QCA_REV_99X0(ar))
2289 return ath10k_pci_qca99x0_chip_reset(ar);
Michal Kaziord63955b2015-01-24 12:14:49 +02002290 else
2291 return -ENOTSUPP;
2292}
2293
Michal Kazior0bc14d02014-10-28 10:32:07 +01002294static int ath10k_pci_hif_power_up(struct ath10k *ar)
Michal Kazior8c5c5362013-07-16 09:38:50 +02002295{
Janusz Dziedzic76d870e2015-05-18 09:38:16 +00002296 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02002297 int ret;
2298
Michal Kazior0bc14d02014-10-28 10:32:07 +01002299 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2300
Janusz Dziedzic76d870e2015-05-18 09:38:16 +00002301 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2302 &ar_pci->link_ctl);
2303 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2304 ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2305
Michal Kazior8c5c5362013-07-16 09:38:50 +02002306 /*
2307 * Bring the target up cleanly.
2308 *
2309 * The target may be in an undefined state with an AUX-powered Target
2310 * and a Host in WoW mode. If the Host crashes, loses power, or is
2311 * restarted (without unloading the driver) then the Target is left
2312 * (aux) powered and running. On a subsequent driver load, the Target
2313 * is in an unexpected state. We try to catch that here in order to
2314 * reset the Target and retry the probe.
2315 */
Michal Kazior0bc14d02014-10-28 10:32:07 +01002316 ret = ath10k_pci_chip_reset(ar);
Michal Kazior5b2589f2013-11-08 08:01:30 +01002317 if (ret) {
Michal Kaziora2fa8802015-01-12 15:29:37 +01002318 if (ath10k_pci_has_fw_crashed(ar)) {
2319 ath10k_warn(ar, "firmware crashed during chip reset\n");
2320 ath10k_pci_fw_crashed_clear(ar);
2321 ath10k_pci_fw_crashed_dump(ar);
2322 }
2323
Michal Kazior0bc14d02014-10-28 10:32:07 +01002324 ath10k_err(ar, "failed to reset chip: %d\n", ret);
Bartosz Markowski707b1bbd2014-10-31 09:03:43 +01002325 goto err_sleep;
Michal Kazior5b2589f2013-11-08 08:01:30 +01002326 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02002327
Michal Kazior84cbf3a2014-10-20 14:14:39 +02002328 ret = ath10k_pci_init_pipes(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02002329 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002330 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
Bartosz Markowski707b1bbd2014-10-31 09:03:43 +01002331 goto err_sleep;
Michal Kaziorab977bd2013-11-25 14:06:26 +01002332 }
2333
Michal Kazior98563d52013-11-08 08:01:33 +01002334 ret = ath10k_pci_init_config(ar);
2335 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002336 ath10k_err(ar, "failed to setup init config: %d\n", ret);
Michal Kazior5c771e72014-08-22 14:23:34 +02002337 goto err_ce;
Michal Kazior98563d52013-11-08 08:01:33 +01002338 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02002339
2340 ret = ath10k_pci_wake_target_cpu(ar);
2341 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002342 ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
Michal Kazior5c771e72014-08-22 14:23:34 +02002343 goto err_ce;
Michal Kazior8c5c5362013-07-16 09:38:50 +02002344 }
2345
2346 return 0;
2347
2348err_ce:
2349 ath10k_pci_ce_deinit(ar);
Michal Kazior0bc14d02014-10-28 10:32:07 +01002350
Bartosz Markowski707b1bbd2014-10-31 09:03:43 +01002351err_sleep:
Michal Kazior8c5c5362013-07-16 09:38:50 +02002352 return ret;
2353}
2354
2355static void ath10k_pci_hif_power_down(struct ath10k *ar)
2356{
Michal Kazior7aa7a722014-08-25 12:09:38 +02002357 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002358
Michal Kaziorc011b282014-10-28 10:32:08 +01002359 /* Currently hif_power_up performs effectively a reset and hif_stop
2360 * resets the chip as well so there's no point in resetting here.
2361 */
Michal Kazior8c5c5362013-07-16 09:38:50 +02002362}
2363
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002364#ifdef CONFIG_PM
2365
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002366static int ath10k_pci_hif_suspend(struct ath10k *ar)
2367{
Michal Kazior77258d42015-05-18 09:38:18 +00002368 /* The grace timer can still be counting down and ar->ps_awake be true.
2369 * It is known that the device may be asleep after resuming regardless
2370 * of the SoC powersave state before suspending. Hence make sure the
2371 * device is asleep before proceeding.
2372 */
2373 ath10k_pci_sleep_sync(ar);
Michal Kazior320e14b2015-03-02 13:22:13 +01002374
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002375 return 0;
2376}
2377
2378static int ath10k_pci_hif_resume(struct ath10k *ar)
2379{
2380 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2381 struct pci_dev *pdev = ar_pci->pdev;
2382 u32 val;
2383
Michal Kazior9ff4be92015-03-02 13:22:14 +01002384 /* Suspend/Resume resets the PCI configuration space, so we have to
2385 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2386 * from interfering with C3 CPU state. pci_restore_state won't help
2387 * here since it only restores the first 64 bytes pci config header.
2388 */
2389 pci_read_config_dword(pdev, 0x40, &val);
2390 if ((val & 0x0000ff00) != 0)
2391 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002392
Michal Kazior77258d42015-05-18 09:38:18 +00002393 return 0;
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002394}
2395#endif
2396
Kalle Valo5e3dd152013-06-12 20:52:10 +03002397static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
Michal Kazior726346f2014-02-27 18:50:04 +02002398 .tx_sg = ath10k_pci_hif_tx_sg,
Kalle Valoeef25402014-09-24 14:16:52 +03002399 .diag_read = ath10k_pci_hif_diag_read,
Yanbo Li9f65ad22014-11-25 12:24:48 +02002400 .diag_write = ath10k_pci_diag_write_mem,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002401 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2402 .start = ath10k_pci_hif_start,
2403 .stop = ath10k_pci_hif_stop,
2404 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2405 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2406 .send_complete_check = ath10k_pci_hif_send_complete_check,
Michal Kaziore799bbf2013-07-05 16:15:12 +03002407 .set_callbacks = ath10k_pci_hif_set_callbacks,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002408 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
Michal Kazior8c5c5362013-07-16 09:38:50 +02002409 .power_up = ath10k_pci_hif_power_up,
2410 .power_down = ath10k_pci_hif_power_down,
Yanbo Li077a3802014-11-25 12:24:33 +02002411 .read32 = ath10k_pci_read32,
2412 .write32 = ath10k_pci_write32,
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002413#ifdef CONFIG_PM
2414 .suspend = ath10k_pci_hif_suspend,
2415 .resume = ath10k_pci_hif_resume,
2416#endif
Kalle Valo5e3dd152013-06-12 20:52:10 +03002417};
2418
2419static void ath10k_pci_ce_tasklet(unsigned long ptr)
2420{
Michal Kazior87263e52013-08-27 13:08:01 +02002421 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002422 struct ath10k_pci *ar_pci = pipe->ar_pci;
2423
2424 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2425}
2426
2427static void ath10k_msi_err_tasklet(unsigned long data)
2428{
2429 struct ath10k *ar = (struct ath10k *)data;
2430
Michal Kazior5c771e72014-08-22 14:23:34 +02002431 if (!ath10k_pci_has_fw_crashed(ar)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002432 ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
Michal Kazior5c771e72014-08-22 14:23:34 +02002433 return;
2434 }
2435
Michal Kazior6f3b7ff2015-01-24 12:14:52 +02002436 ath10k_pci_irq_disable(ar);
Michal Kazior5c771e72014-08-22 14:23:34 +02002437 ath10k_pci_fw_crashed_clear(ar);
2438 ath10k_pci_fw_crashed_dump(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002439}
2440
2441/*
2442 * Handler for a per-engine interrupt on a PARTICULAR CE.
2443 * This is used in cases where each CE has a private MSI interrupt.
2444 */
2445static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2446{
2447 struct ath10k *ar = arg;
2448 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2449 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2450
Dan Carpentere5742672013-06-18 10:28:46 +03002451 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002452 ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
2453 ce_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002454 return IRQ_HANDLED;
2455 }
2456
2457 /*
2458 * NOTE: We are able to derive ce_id from irq because we
2459 * use a one-to-one mapping for CE's 0..5.
2460 * CE's 6 & 7 do not use interrupts at all.
2461 *
2462 * This mapping must be kept in sync with the mapping
2463 * used by firmware.
2464 */
2465 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2466 return IRQ_HANDLED;
2467}
2468
2469static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2470{
2471 struct ath10k *ar = arg;
2472 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2473
2474 tasklet_schedule(&ar_pci->msi_fw_err);
2475 return IRQ_HANDLED;
2476}
2477
2478/*
2479 * Top-level interrupt handler for all PCI interrupts from a Target.
2480 * When a block of MSI interrupts is allocated, this top-level handler
2481 * is not used; instead, we directly call the correct sub-handler.
2482 */
2483static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2484{
2485 struct ath10k *ar = arg;
2486 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2487
2488 if (ar_pci->num_msi_intrs == 0) {
Michal Kaziore5398872013-11-25 14:06:20 +01002489 if (!ath10k_pci_irq_pending(ar))
2490 return IRQ_NONE;
2491
Michal Kazior26852182013-11-25 14:06:25 +01002492 ath10k_pci_disable_and_clear_legacy_irq(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002493 }
2494
2495 tasklet_schedule(&ar_pci->intr_tq);
2496
2497 return IRQ_HANDLED;
2498}
2499
2500static void ath10k_pci_tasklet(unsigned long data)
2501{
2502 struct ath10k *ar = (struct ath10k *)data;
2503 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2504
Michal Kazior5c771e72014-08-22 14:23:34 +02002505 if (ath10k_pci_has_fw_crashed(ar)) {
Michal Kazior6f3b7ff2015-01-24 12:14:52 +02002506 ath10k_pci_irq_disable(ar);
Michal Kazior5c771e72014-08-22 14:23:34 +02002507 ath10k_pci_fw_crashed_clear(ar);
2508 ath10k_pci_fw_crashed_dump(ar);
2509 return;
2510 }
2511
Kalle Valo5e3dd152013-06-12 20:52:10 +03002512 ath10k_ce_per_engine_service_any(ar);
2513
Michal Kazior26852182013-11-25 14:06:25 +01002514 /* Re-enable legacy irq that was disabled in the irq handler */
2515 if (ar_pci->num_msi_intrs == 0)
2516 ath10k_pci_enable_legacy_irq(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002517}
2518
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002519static int ath10k_pci_request_irq_msix(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002520{
2521 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002522 int ret, i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002523
2524 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2525 ath10k_pci_msi_fw_handler,
2526 IRQF_SHARED, "ath10k_pci", ar);
Michal Kazior591ecdb2013-07-31 10:55:15 +02002527 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002528 ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
Michal Kazior591ecdb2013-07-31 10:55:15 +02002529 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002530 return ret;
Michal Kazior591ecdb2013-07-31 10:55:15 +02002531 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002532
2533 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2534 ret = request_irq(ar_pci->pdev->irq + i,
2535 ath10k_pci_per_engine_handler,
2536 IRQF_SHARED, "ath10k_pci", ar);
2537 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002538 ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002539 ar_pci->pdev->irq + i, ret);
2540
Michal Kazior87b14232013-06-26 08:50:50 +02002541 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2542 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002543
Michal Kazior87b14232013-06-26 08:50:50 +02002544 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002545 return ret;
2546 }
2547 }
2548
Kalle Valo5e3dd152013-06-12 20:52:10 +03002549 return 0;
2550}
2551
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002552static int ath10k_pci_request_irq_msi(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002553{
2554 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2555 int ret;
2556
2557 ret = request_irq(ar_pci->pdev->irq,
2558 ath10k_pci_interrupt_handler,
2559 IRQF_SHARED, "ath10k_pci", ar);
Kalle Valof3782742013-10-17 11:36:15 +03002560 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002561 ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002562 ar_pci->pdev->irq, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002563 return ret;
Kalle Valof3782742013-10-17 11:36:15 +03002564 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002565
Kalle Valo5e3dd152013-06-12 20:52:10 +03002566 return 0;
2567}
2568
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002569static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002570{
2571 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002572 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002573
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002574 ret = request_irq(ar_pci->pdev->irq,
2575 ath10k_pci_interrupt_handler,
2576 IRQF_SHARED, "ath10k_pci", ar);
Kalle Valof3782742013-10-17 11:36:15 +03002577 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002578 ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002579 ar_pci->pdev->irq, ret);
Kalle Valof3782742013-10-17 11:36:15 +03002580 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002581 }
2582
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002583 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002584}
2585
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002586static int ath10k_pci_request_irq(struct ath10k *ar)
2587{
2588 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2589
2590 switch (ar_pci->num_msi_intrs) {
2591 case 0:
2592 return ath10k_pci_request_irq_legacy(ar);
2593 case 1:
2594 return ath10k_pci_request_irq_msi(ar);
2595 case MSI_NUM_REQUEST:
2596 return ath10k_pci_request_irq_msix(ar);
2597 }
2598
Michal Kazior7aa7a722014-08-25 12:09:38 +02002599 ath10k_warn(ar, "unknown irq configuration upon request\n");
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002600 return -EINVAL;
2601}
2602
2603static void ath10k_pci_free_irq(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002604{
2605 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2606 int i;
2607
2608 /* There's at least one interrupt irregardless whether its legacy INTR
2609 * or MSI or MSI-X */
2610 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2611 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002612}
2613
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002614static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2615{
2616 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2617 int i;
2618
2619 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2620 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2621 (unsigned long)ar);
2622
2623 for (i = 0; i < CE_COUNT; i++) {
2624 ar_pci->pipe_info[i].ar_pci = ar_pci;
2625 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2626 (unsigned long)&ar_pci->pipe_info[i]);
2627 }
2628}
2629
2630static int ath10k_pci_init_irq(struct ath10k *ar)
2631{
2632 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2633 int ret;
2634
2635 ath10k_pci_init_irq_tasklets(ar);
2636
Michal Kazior403d6272014-08-22 14:23:31 +02002637 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002638 ath10k_info(ar, "limiting irq mode to: %d\n",
2639 ath10k_pci_irq_mode);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002640
2641 /* Try MSI-X */
Michal Kazior0edf2572014-08-07 11:03:29 +02002642 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002643 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
Alexander Gordeev5ad68672014-02-13 17:50:02 +02002644 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
Kalle Valo5b07e072014-09-14 12:50:06 +03002645 ar_pci->num_msi_intrs);
Alexander Gordeev5ad68672014-02-13 17:50:02 +02002646 if (ret > 0)
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002647 return 0;
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002648
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002649 /* fall-through */
2650 }
2651
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002652 /* Try MSI */
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002653 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2654 ar_pci->num_msi_intrs = 1;
2655 ret = pci_enable_msi(ar_pci->pdev);
2656 if (ret == 0)
2657 return 0;
2658
2659 /* fall-through */
2660 }
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002661
2662 /* Try legacy irq
2663 *
2664 * A potential race occurs here: The CORE_BASE write
2665 * depends on target correctly decoding AXI address but
2666 * host won't know when target writes BAR to CORE_CTRL.
2667 * This write might get lost if target has NOT written BAR.
2668 * For now, fix the race by repeating the write in below
2669 * synchronization checking. */
2670 ar_pci->num_msi_intrs = 0;
2671
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002672 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2673 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002674
2675 return 0;
2676}
2677
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002678static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002679{
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002680 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2681 0);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002682}
2683
2684static int ath10k_pci_deinit_irq(struct ath10k *ar)
2685{
2686 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2687
2688 switch (ar_pci->num_msi_intrs) {
2689 case 0:
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002690 ath10k_pci_deinit_irq_legacy(ar);
2691 return 0;
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002692 case 1:
2693 /* fall-through */
2694 case MSI_NUM_REQUEST:
2695 pci_disable_msi(ar_pci->pdev);
2696 return 0;
Alexander Gordeevbb8b6212014-02-13 17:50:01 +02002697 default:
2698 pci_disable_msi(ar_pci->pdev);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002699 }
2700
Michal Kazior7aa7a722014-08-25 12:09:38 +02002701 ath10k_warn(ar, "unknown irq configuration upon deinit\n");
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002702 return -EINVAL;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002703}
2704
Michal Kaziord7fb47f2013-11-08 08:01:26 +01002705static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002706{
2707 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo0399eca2014-03-28 09:32:21 +02002708 unsigned long timeout;
Kalle Valo0399eca2014-03-28 09:32:21 +02002709 u32 val;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002710
Michal Kazior7aa7a722014-08-25 12:09:38 +02002711 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002712
Kalle Valo0399eca2014-03-28 09:32:21 +02002713 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2714
2715 do {
2716 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2717
Michal Kazior7aa7a722014-08-25 12:09:38 +02002718 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2719 val);
Kalle Valo50f87a62014-03-28 09:32:52 +02002720
Kalle Valo0399eca2014-03-28 09:32:21 +02002721 /* target should never return this */
2722 if (val == 0xffffffff)
2723 continue;
2724
Michal Kazior7710cd22014-04-23 19:30:04 +03002725 /* the device has crashed so don't bother trying anymore */
2726 if (val & FW_IND_EVENT_PENDING)
2727 break;
2728
Kalle Valo0399eca2014-03-28 09:32:21 +02002729 if (val & FW_IND_INITIALIZED)
2730 break;
2731
Kalle Valo5e3dd152013-06-12 20:52:10 +03002732 if (ar_pci->num_msi_intrs == 0)
2733 /* Fix potential race by repeating CORE_BASE writes */
Michal Kaziora4282492014-10-20 14:14:37 +02002734 ath10k_pci_enable_legacy_irq(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002735
Kalle Valo0399eca2014-03-28 09:32:21 +02002736 mdelay(10);
2737 } while (time_before(jiffies, timeout));
2738
Michal Kaziora4282492014-10-20 14:14:37 +02002739 ath10k_pci_disable_and_clear_legacy_irq(ar);
Michal Kazior7c0f0e32014-10-20 14:14:38 +02002740 ath10k_pci_irq_msi_fw_mask(ar);
Michal Kaziora4282492014-10-20 14:14:37 +02002741
Michal Kazior6a4f6e12014-04-23 19:30:03 +03002742 if (val == 0xffffffff) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002743 ath10k_err(ar, "failed to read device register, device is gone\n");
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002744 return -EIO;
Michal Kazior6a4f6e12014-04-23 19:30:03 +03002745 }
2746
Michal Kazior7710cd22014-04-23 19:30:04 +03002747 if (val & FW_IND_EVENT_PENDING) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002748 ath10k_warn(ar, "device has crashed during init\n");
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002749 return -ECOMM;
Michal Kazior7710cd22014-04-23 19:30:04 +03002750 }
2751
Michal Kazior6a4f6e12014-04-23 19:30:03 +03002752 if (!(val & FW_IND_INITIALIZED)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002753 ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
Kalle Valo0399eca2014-03-28 09:32:21 +02002754 val);
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002755 return -ETIMEDOUT;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002756 }
2757
Michal Kazior7aa7a722014-08-25 12:09:38 +02002758 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002759 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002760}
2761
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002762static int ath10k_pci_cold_reset(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002763{
Kalle Valo5e3dd152013-06-12 20:52:10 +03002764 u32 val;
2765
Michal Kazior7aa7a722014-08-25 12:09:38 +02002766 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002767
Ben Greearf51dbe72014-09-29 14:41:46 +03002768 spin_lock_bh(&ar->data_lock);
2769
2770 ar->stats.fw_cold_reset_counter++;
2771
2772 spin_unlock_bh(&ar->data_lock);
2773
Kalle Valo5e3dd152013-06-12 20:52:10 +03002774 /* Put Target, including PCIe, into RESET. */
Kalle Valoe479ed42013-09-01 10:01:53 +03002775 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002776 val |= 1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002777 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002778
Vasanthakumar Thiagarajanacd19582015-07-10 14:31:20 +05302779 /* After writing into SOC_GLOBAL_RESET to put device into
2780 * reset and pulling out of reset pcie may not be stable
2781 * for any immediate pcie register access and cause bus error,
2782 * add delay before any pcie access request to fix this issue.
2783 */
2784 msleep(20);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002785
2786 /* Pull Target, including PCIe, out of RESET. */
2787 val &= ~1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002788 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002789
Vasanthakumar Thiagarajanacd19582015-07-10 14:31:20 +05302790 msleep(20);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002791
Michal Kazior7aa7a722014-08-25 12:09:38 +02002792 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
Kalle Valo50f87a62014-03-28 09:32:52 +02002793
Michal Kazior5b2589f2013-11-08 08:01:30 +01002794 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002795}
2796
Michal Kazior2986e3e2014-08-07 11:03:30 +02002797static int ath10k_pci_claim(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002798{
Michal Kazior2986e3e2014-08-07 11:03:30 +02002799 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2800 struct pci_dev *pdev = ar_pci->pdev;
Michal Kazior2986e3e2014-08-07 11:03:30 +02002801 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002802
2803 pci_set_drvdata(pdev, ar);
2804
Kalle Valo5e3dd152013-06-12 20:52:10 +03002805 ret = pci_enable_device(pdev);
2806 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002807 ath10k_err(ar, "failed to enable pci device: %d\n", ret);
Michal Kazior2986e3e2014-08-07 11:03:30 +02002808 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002809 }
2810
Kalle Valo5e3dd152013-06-12 20:52:10 +03002811 ret = pci_request_region(pdev, BAR_NUM, "ath");
2812 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002813 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
Michal Kazior2986e3e2014-08-07 11:03:30 +02002814 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002815 goto err_device;
2816 }
2817
Michal Kazior2986e3e2014-08-07 11:03:30 +02002818 /* Target expects 32 bit DMA. Enforce it. */
Kalle Valo5e3dd152013-06-12 20:52:10 +03002819 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2820 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002821 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002822 goto err_region;
2823 }
2824
2825 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2826 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002827 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
Michal Kazior2986e3e2014-08-07 11:03:30 +02002828 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002829 goto err_region;
2830 }
2831
Kalle Valo5e3dd152013-06-12 20:52:10 +03002832 pci_set_master(pdev);
2833
Kalle Valo5e3dd152013-06-12 20:52:10 +03002834 /* Arrange for access to Target SoC registers. */
Michal Kazioraeae5b42015-06-15 14:46:42 +03002835 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
Michal Kazior2986e3e2014-08-07 11:03:30 +02002836 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
2837 if (!ar_pci->mem) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002838 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002839 ret = -EIO;
2840 goto err_master;
2841 }
2842
Michal Kazior7aa7a722014-08-25 12:09:38 +02002843 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
Michal Kazior2986e3e2014-08-07 11:03:30 +02002844 return 0;
2845
2846err_master:
2847 pci_clear_master(pdev);
2848
2849err_region:
2850 pci_release_region(pdev, BAR_NUM);
2851
2852err_device:
2853 pci_disable_device(pdev);
2854
2855 return ret;
2856}
2857
2858static void ath10k_pci_release(struct ath10k *ar)
2859{
2860 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2861 struct pci_dev *pdev = ar_pci->pdev;
2862
2863 pci_iounmap(pdev, ar_pci->mem);
2864 pci_release_region(pdev, BAR_NUM);
2865 pci_clear_master(pdev);
2866 pci_disable_device(pdev);
2867}
2868
Michal Kazior7505f7c2014-12-02 10:55:54 +02002869static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
2870{
2871 const struct ath10k_pci_supp_chip *supp_chip;
2872 int i;
2873 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
2874
2875 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
2876 supp_chip = &ath10k_pci_supp_chips[i];
2877
2878 if (supp_chip->dev_id == dev_id &&
2879 supp_chip->rev_id == rev_id)
2880 return true;
2881 }
2882
2883 return false;
2884}
2885
Kalle Valo5e3dd152013-06-12 20:52:10 +03002886static int ath10k_pci_probe(struct pci_dev *pdev,
2887 const struct pci_device_id *pci_dev)
2888{
Kalle Valo5e3dd152013-06-12 20:52:10 +03002889 int ret = 0;
2890 struct ath10k *ar;
2891 struct ath10k_pci *ar_pci;
Michal Kaziord63955b2015-01-24 12:14:49 +02002892 enum ath10k_hw_rev hw_rev;
Michal Kazior2986e3e2014-08-07 11:03:30 +02002893 u32 chip_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002894
Michal Kaziord63955b2015-01-24 12:14:49 +02002895 switch (pci_dev->device) {
2896 case QCA988X_2_0_DEVICE_ID:
2897 hw_rev = ATH10K_HW_QCA988X;
2898 break;
2899 case QCA6174_2_1_DEVICE_ID:
2900 hw_rev = ATH10K_HW_QCA6174;
2901 break;
Vasanthakumar Thiagarajan8bd47022015-06-18 12:31:03 +05302902 case QCA99X0_2_0_DEVICE_ID:
2903 hw_rev = ATH10K_HW_QCA99X0;
2904 break;
Michal Kaziord63955b2015-01-24 12:14:49 +02002905 default:
2906 WARN_ON(1);
2907 return -ENOTSUPP;
2908 }
2909
2910 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
2911 hw_rev, &ath10k_pci_hif_ops);
Michal Kaziore7b54192014-08-07 11:03:27 +02002912 if (!ar) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002913 dev_err(&pdev->dev, "failed to allocate core\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002914 return -ENOMEM;
Michal Kaziore7b54192014-08-07 11:03:27 +02002915 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002916
Michal Kazior7aa7a722014-08-25 12:09:38 +02002917 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
2918
Michal Kaziore7b54192014-08-07 11:03:27 +02002919 ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002920 ar_pci->pdev = pdev;
2921 ar_pci->dev = &pdev->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002922 ar_pci->ar = ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002923
Michal Kaziorde57e2c2015-04-17 09:19:17 +00002924 if (pdev->subsystem_vendor || pdev->subsystem_device)
2925 scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
2926 "%04x:%04x:%04x:%04x",
2927 pdev->vendor, pdev->device,
2928 pdev->subsystem_vendor, pdev->subsystem_device);
2929
Kalle Valo5e3dd152013-06-12 20:52:10 +03002930 spin_lock_init(&ar_pci->ce_lock);
Michal Kazior77258d42015-05-18 09:38:18 +00002931 spin_lock_init(&ar_pci->ps_lock);
2932
Michal Kazior728f95e2014-08-22 14:33:14 +02002933 setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
2934 (unsigned long)ar);
Michal Kazior77258d42015-05-18 09:38:18 +00002935 setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
2936 (unsigned long)ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002937
Michal Kazior2986e3e2014-08-07 11:03:30 +02002938 ret = ath10k_pci_claim(ar);
Kalle Valoe01ae682013-09-01 11:22:14 +03002939 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002940 ath10k_err(ar, "failed to claim device: %d\n", ret);
Michal Kaziore7b54192014-08-07 11:03:27 +02002941 goto err_core_destroy;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002942 }
2943
Michal Kazior84cbf3a2014-10-20 14:14:39 +02002944 ret = ath10k_pci_alloc_pipes(ar);
Michal Kazior25d0dbc2014-03-28 10:02:38 +02002945 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002946 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
2947 ret);
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002948 goto err_sleep;
Michal Kazior25d0dbc2014-03-28 10:02:38 +02002949 }
2950
Michal Kazior403d6272014-08-22 14:23:31 +02002951 ath10k_pci_ce_deinit(ar);
Michal Kazior7c0f0e32014-10-20 14:14:38 +02002952 ath10k_pci_irq_disable(ar);
Michal Kazior5c771e72014-08-22 14:23:34 +02002953
Michal Kazior403d6272014-08-22 14:23:31 +02002954 ret = ath10k_pci_init_irq(ar);
2955 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002956 ath10k_err(ar, "failed to init irqs: %d\n", ret);
Michal Kazior84cbf3a2014-10-20 14:14:39 +02002957 goto err_free_pipes;
Michal Kazior403d6272014-08-22 14:23:31 +02002958 }
2959
Michal Kazior7aa7a722014-08-25 12:09:38 +02002960 ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
Michal Kazior403d6272014-08-22 14:23:31 +02002961 ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
2962 ath10k_pci_irq_mode, ath10k_pci_reset_mode);
2963
Michal Kazior5c771e72014-08-22 14:23:34 +02002964 ret = ath10k_pci_request_irq(ar);
Michal Kazior403d6272014-08-22 14:23:31 +02002965 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002966 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
Michal Kazior403d6272014-08-22 14:23:31 +02002967 goto err_deinit_irq;
2968 }
2969
Michal Kazior1a7fecb2015-01-24 12:14:48 +02002970 ret = ath10k_pci_chip_reset(ar);
2971 if (ret) {
2972 ath10k_err(ar, "failed to reset chip: %d\n", ret);
2973 goto err_free_irq;
2974 }
2975
2976 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2977 if (chip_id == 0xffffffff) {
2978 ath10k_err(ar, "failed to get chip id\n");
2979 goto err_free_irq;
2980 }
2981
2982 if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
2983 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
2984 pdev->device, chip_id);
Michal Kaziord9585a92015-04-10 13:01:27 +00002985 goto err_free_irq;
Michal Kazior1a7fecb2015-01-24 12:14:48 +02002986 }
2987
Kalle Valoe01ae682013-09-01 11:22:14 +03002988 ret = ath10k_core_register(ar, chip_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002989 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002990 ath10k_err(ar, "failed to register driver core: %d\n", ret);
Michal Kazior5c771e72014-08-22 14:23:34 +02002991 goto err_free_irq;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002992 }
2993
2994 return 0;
2995
Michal Kazior5c771e72014-08-22 14:23:34 +02002996err_free_irq:
2997 ath10k_pci_free_irq(ar);
Michal Kazior21396272014-08-28 10:24:40 +02002998 ath10k_pci_kill_tasklet(ar);
Michal Kazior5c771e72014-08-22 14:23:34 +02002999
Michal Kazior403d6272014-08-22 14:23:31 +02003000err_deinit_irq:
3001 ath10k_pci_deinit_irq(ar);
3002
Michal Kazior84cbf3a2014-10-20 14:14:39 +02003003err_free_pipes:
3004 ath10k_pci_free_pipes(ar);
Michal Kazior2986e3e2014-08-07 11:03:30 +02003005
Michal Kaziorc0c378f2014-08-07 11:03:28 +02003006err_sleep:
Michal Kazior0bcbbe62015-05-29 07:35:24 +02003007 ath10k_pci_sleep_sync(ar);
Michal Kazior2986e3e2014-08-07 11:03:30 +02003008 ath10k_pci_release(ar);
3009
Michal Kaziore7b54192014-08-07 11:03:27 +02003010err_core_destroy:
Kalle Valo5e3dd152013-06-12 20:52:10 +03003011 ath10k_core_destroy(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003012
3013 return ret;
3014}
3015
3016static void ath10k_pci_remove(struct pci_dev *pdev)
3017{
3018 struct ath10k *ar = pci_get_drvdata(pdev);
3019 struct ath10k_pci *ar_pci;
3020
Michal Kazior7aa7a722014-08-25 12:09:38 +02003021 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03003022
3023 if (!ar)
3024 return;
3025
3026 ar_pci = ath10k_pci_priv(ar);
3027
3028 if (!ar_pci)
3029 return;
3030
Kalle Valo5e3dd152013-06-12 20:52:10 +03003031 ath10k_core_unregister(ar);
Michal Kazior5c771e72014-08-22 14:23:34 +02003032 ath10k_pci_free_irq(ar);
Michal Kazior21396272014-08-28 10:24:40 +02003033 ath10k_pci_kill_tasklet(ar);
Michal Kazior403d6272014-08-22 14:23:31 +02003034 ath10k_pci_deinit_irq(ar);
3035 ath10k_pci_ce_deinit(ar);
Michal Kazior84cbf3a2014-10-20 14:14:39 +02003036 ath10k_pci_free_pipes(ar);
Michal Kazior77258d42015-05-18 09:38:18 +00003037 ath10k_pci_sleep_sync(ar);
Michal Kazior2986e3e2014-08-07 11:03:30 +02003038 ath10k_pci_release(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003039 ath10k_core_destroy(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003040}
3041
Kalle Valo5e3dd152013-06-12 20:52:10 +03003042MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3043
3044static struct pci_driver ath10k_pci_driver = {
3045 .name = "ath10k_pci",
3046 .id_table = ath10k_pci_id_table,
3047 .probe = ath10k_pci_probe,
3048 .remove = ath10k_pci_remove,
Kalle Valo5e3dd152013-06-12 20:52:10 +03003049};
3050
3051static int __init ath10k_pci_init(void)
3052{
3053 int ret;
3054
3055 ret = pci_register_driver(&ath10k_pci_driver);
3056 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02003057 printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3058 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003059
3060 return ret;
3061}
3062module_init(ath10k_pci_init);
3063
3064static void __exit ath10k_pci_exit(void)
3065{
3066 pci_unregister_driver(&ath10k_pci_driver);
3067}
3068
3069module_exit(ath10k_pci_exit);
3070
3071MODULE_AUTHOR("Qualcomm Atheros");
3072MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
3073MODULE_LICENSE("Dual BSD/GPL");
Bartosz Markowski5c427f52015-02-18 13:16:37 +01003074
3075/* QCA988x 2.0 firmware files */
Bartosz Markowski8026cae2014-10-06 14:16:41 +02003076MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
3077MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3078MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
Bartosz Markowski5c427f52015-02-18 13:16:37 +01003079MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
Kalle Valo53513c32015-03-25 13:12:42 +02003080MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
Kalle Valo5e3dd152013-06-12 20:52:10 +03003081MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
Bartosz Markowski5c427f52015-02-18 13:16:37 +01003082
3083/* QCA6174 2.1 firmware files */
3084MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
Michal Kaziore451c1d2015-05-26 13:09:22 +02003085MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
Bartosz Markowski5c427f52015-02-18 13:16:37 +01003086MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3087
3088/* QCA6174 3.1 firmware files */
3089MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
Michal Kaziore451c1d2015-05-26 13:09:22 +02003090MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
Bartosz Markowski5c427f52015-02-18 13:16:37 +01003091MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);