blob: 05e6f8b4f1b83e7231d85a36e70c16efdaef025d [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
Kalle Valo650b91f2013-11-20 10:00:49 +020022#include <linux/bitops.h>
Kalle Valo5e3dd152013-06-12 20:52:10 +030023
24#include "core.h"
25#include "debug.h"
26
27#include "targaddrs.h"
28#include "bmi.h"
29
30#include "hif.h"
31#include "htc.h"
32
33#include "ce.h"
34#include "pci.h"
35
Bartosz Markowski8cc8df92013-08-02 09:58:49 +020036static unsigned int ath10k_target_ps;
Kalle Valo5e3dd152013-06-12 20:52:10 +030037module_param(ath10k_target_ps, uint, 0644);
38MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
39
Kalle Valo5e3dd152013-06-12 20:52:10 +030040#define QCA988X_2_0_DEVICE_ID (0x003c)
41
42static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
Kalle Valo5e3dd152013-06-12 20:52:10 +030043 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
44 {0}
45};
46
47static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
48 u32 *data);
49
50static void ath10k_pci_process_ce(struct ath10k *ar);
51static int ath10k_pci_post_rx(struct ath10k *ar);
Michal Kazior87263e52013-08-27 13:08:01 +020052static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
Kalle Valo5e3dd152013-06-12 20:52:10 +030053 int num);
Michal Kazior87263e52013-08-27 13:08:01 +020054static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
Kalle Valo5e3dd152013-06-12 20:52:10 +030055static void ath10k_pci_stop_ce(struct ath10k *ar);
Michal Kazior5b2589f2013-11-08 08:01:30 +010056static int ath10k_pci_device_reset(struct ath10k *ar);
Michal Kaziord7fb47f2013-11-08 08:01:26 +010057static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +010058static int ath10k_pci_init_irq(struct ath10k *ar);
59static int ath10k_pci_deinit_irq(struct ath10k *ar);
60static int ath10k_pci_request_irq(struct ath10k *ar);
61static void ath10k_pci_free_irq(struct ath10k *ar);
Michal Kazior85622cd2013-11-25 14:06:22 +010062static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
63 struct ath10k_ce_pipe *rx_pipe,
64 struct bmi_xfer *xfer);
Michal Kaziorc80de122013-11-25 14:06:23 +010065static void ath10k_pci_cleanup_ce(struct ath10k *ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +030066
67static const struct ce_attr host_ce_config_wlan[] = {
Kalle Valo48e9c222013-09-01 10:01:32 +030068 /* CE0: host->target HTC control and raw streams */
69 {
70 .flags = CE_ATTR_FLAGS,
71 .src_nentries = 16,
72 .src_sz_max = 256,
73 .dest_nentries = 0,
74 },
75
76 /* CE1: target->host HTT + HTC control */
77 {
78 .flags = CE_ATTR_FLAGS,
79 .src_nentries = 0,
80 .src_sz_max = 512,
81 .dest_nentries = 512,
82 },
83
84 /* CE2: target->host WMI */
85 {
86 .flags = CE_ATTR_FLAGS,
87 .src_nentries = 0,
88 .src_sz_max = 2048,
89 .dest_nentries = 32,
90 },
91
92 /* CE3: host->target WMI */
93 {
94 .flags = CE_ATTR_FLAGS,
95 .src_nentries = 32,
96 .src_sz_max = 2048,
97 .dest_nentries = 0,
98 },
99
100 /* CE4: host->target HTT */
101 {
102 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
103 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
104 .src_sz_max = 256,
105 .dest_nentries = 0,
106 },
107
108 /* CE5: unused */
109 {
110 .flags = CE_ATTR_FLAGS,
111 .src_nentries = 0,
112 .src_sz_max = 0,
113 .dest_nentries = 0,
114 },
115
116 /* CE6: target autonomous hif_memcpy */
117 {
118 .flags = CE_ATTR_FLAGS,
119 .src_nentries = 0,
120 .src_sz_max = 0,
121 .dest_nentries = 0,
122 },
123
124 /* CE7: ce_diag, the Diagnostic Window */
125 {
126 .flags = CE_ATTR_FLAGS,
127 .src_nentries = 2,
128 .src_sz_max = DIAG_TRANSFER_LIMIT,
129 .dest_nentries = 2,
130 },
Kalle Valo5e3dd152013-06-12 20:52:10 +0300131};
132
133/* Target firmware's Copy Engine configuration. */
134static const struct ce_pipe_config target_ce_config_wlan[] = {
Kalle Valod88effb2013-09-01 10:01:39 +0300135 /* CE0: host->target HTC control and raw streams */
136 {
137 .pipenum = 0,
138 .pipedir = PIPEDIR_OUT,
139 .nentries = 32,
140 .nbytes_max = 256,
141 .flags = CE_ATTR_FLAGS,
142 .reserved = 0,
143 },
144
145 /* CE1: target->host HTT + HTC control */
146 {
147 .pipenum = 1,
148 .pipedir = PIPEDIR_IN,
149 .nentries = 32,
150 .nbytes_max = 512,
151 .flags = CE_ATTR_FLAGS,
152 .reserved = 0,
153 },
154
155 /* CE2: target->host WMI */
156 {
157 .pipenum = 2,
158 .pipedir = PIPEDIR_IN,
159 .nentries = 32,
160 .nbytes_max = 2048,
161 .flags = CE_ATTR_FLAGS,
162 .reserved = 0,
163 },
164
165 /* CE3: host->target WMI */
166 {
167 .pipenum = 3,
168 .pipedir = PIPEDIR_OUT,
169 .nentries = 32,
170 .nbytes_max = 2048,
171 .flags = CE_ATTR_FLAGS,
172 .reserved = 0,
173 },
174
175 /* CE4: host->target HTT */
176 {
177 .pipenum = 4,
178 .pipedir = PIPEDIR_OUT,
179 .nentries = 256,
180 .nbytes_max = 256,
181 .flags = CE_ATTR_FLAGS,
182 .reserved = 0,
183 },
184
Kalle Valo5e3dd152013-06-12 20:52:10 +0300185 /* NB: 50% of src nentries, since tx has 2 frags */
Kalle Valod88effb2013-09-01 10:01:39 +0300186
187 /* CE5: unused */
188 {
189 .pipenum = 5,
190 .pipedir = PIPEDIR_OUT,
191 .nentries = 32,
192 .nbytes_max = 2048,
193 .flags = CE_ATTR_FLAGS,
194 .reserved = 0,
195 },
196
197 /* CE6: Reserved for target autonomous hif_memcpy */
198 {
199 .pipenum = 6,
200 .pipedir = PIPEDIR_INOUT,
201 .nentries = 32,
202 .nbytes_max = 4096,
203 .flags = CE_ATTR_FLAGS,
204 .reserved = 0,
205 },
206
Kalle Valo5e3dd152013-06-12 20:52:10 +0300207 /* CE7 used only by Host */
208};
209
Michal Kaziore5398872013-11-25 14:06:20 +0100210static bool ath10k_pci_irq_pending(struct ath10k *ar)
211{
212 u32 cause;
213
214 /* Check if the shared legacy irq is for us */
215 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
216 PCIE_INTR_CAUSE_ADDRESS);
217 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
218 return true;
219
220 return false;
221}
222
Kalle Valo5e3dd152013-06-12 20:52:10 +0300223/*
224 * Diagnostic read/write access is provided for startup/config/debug usage.
225 * Caller must guarantee proper alignment, when applicable, and single user
226 * at any moment.
227 */
228static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
229 int nbytes)
230{
231 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
232 int ret = 0;
233 u32 buf;
234 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
235 unsigned int id;
236 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200237 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300238 /* Host buffer address in CE space */
239 u32 ce_data;
240 dma_addr_t ce_data_base = 0;
241 void *data_buf = NULL;
242 int i;
243
244 /*
245 * This code cannot handle reads to non-memory space. Redirect to the
246 * register read fn but preserve the multi word read capability of
247 * this fn
248 */
249 if (address < DRAM_BASE_ADDRESS) {
250 if (!IS_ALIGNED(address, 4) ||
251 !IS_ALIGNED((unsigned long)data, 4))
252 return -EIO;
253
254 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
255 ar, address, (u32 *)data)) == 0)) {
256 nbytes -= sizeof(u32);
257 address += sizeof(u32);
258 data += sizeof(u32);
259 }
260 return ret;
261 }
262
263 ce_diag = ar_pci->ce_diag;
264
265 /*
266 * Allocate a temporary bounce buffer to hold caller's data
267 * to be DMA'ed from Target. This guarantees
268 * 1) 4-byte alignment
269 * 2) Buffer in DMA-able space
270 */
271 orig_nbytes = nbytes;
272 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
273 orig_nbytes,
274 &ce_data_base);
275
276 if (!data_buf) {
277 ret = -ENOMEM;
278 goto done;
279 }
280 memset(data_buf, 0, orig_nbytes);
281
282 remaining_bytes = orig_nbytes;
283 ce_data = ce_data_base;
284 while (remaining_bytes) {
285 nbytes = min_t(unsigned int, remaining_bytes,
286 DIAG_TRANSFER_LIMIT);
287
288 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
289 if (ret != 0)
290 goto done;
291
292 /* Request CE to send from Target(!) address to Host buffer */
293 /*
294 * The address supplied by the caller is in the
295 * Target CPU virtual address space.
296 *
297 * In order to use this address with the diagnostic CE,
298 * convert it from Target CPU virtual address space
299 * to CE address space
300 */
301 ath10k_pci_wake(ar);
302 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
303 address);
304 ath10k_pci_sleep(ar);
305
306 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
307 0);
308 if (ret)
309 goto done;
310
311 i = 0;
312 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
313 &completed_nbytes,
314 &id) != 0) {
315 mdelay(1);
316 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
317 ret = -EBUSY;
318 goto done;
319 }
320 }
321
322 if (nbytes != completed_nbytes) {
323 ret = -EIO;
324 goto done;
325 }
326
327 if (buf != (u32) address) {
328 ret = -EIO;
329 goto done;
330 }
331
332 i = 0;
333 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
334 &completed_nbytes,
335 &id, &flags) != 0) {
336 mdelay(1);
337
338 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
339 ret = -EBUSY;
340 goto done;
341 }
342 }
343
344 if (nbytes != completed_nbytes) {
345 ret = -EIO;
346 goto done;
347 }
348
349 if (buf != ce_data) {
350 ret = -EIO;
351 goto done;
352 }
353
354 remaining_bytes -= nbytes;
355 address += nbytes;
356 ce_data += nbytes;
357 }
358
359done:
360 if (ret == 0) {
361 /* Copy data from allocated DMA buf to caller's buf */
362 WARN_ON_ONCE(orig_nbytes & 3);
363 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
364 ((u32 *)data)[i] =
365 __le32_to_cpu(((__le32 *)data_buf)[i]);
366 }
367 } else
368 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
369 __func__, address);
370
371 if (data_buf)
372 pci_free_consistent(ar_pci->pdev, orig_nbytes,
373 data_buf, ce_data_base);
374
375 return ret;
376}
377
378/* Read 4-byte aligned data from Target memory or register */
379static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
380 u32 *data)
381{
382 /* Assume range doesn't cross this boundary */
383 if (address >= DRAM_BASE_ADDRESS)
384 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
385
386 ath10k_pci_wake(ar);
387 *data = ath10k_pci_read32(ar, address);
388 ath10k_pci_sleep(ar);
389 return 0;
390}
391
392static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
393 const void *data, int nbytes)
394{
395 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
396 int ret = 0;
397 u32 buf;
398 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
399 unsigned int id;
400 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200401 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300402 void *data_buf = NULL;
403 u32 ce_data; /* Host buffer address in CE space */
404 dma_addr_t ce_data_base = 0;
405 int i;
406
407 ce_diag = ar_pci->ce_diag;
408
409 /*
410 * Allocate a temporary bounce buffer to hold caller's data
411 * to be DMA'ed to Target. This guarantees
412 * 1) 4-byte alignment
413 * 2) Buffer in DMA-able space
414 */
415 orig_nbytes = nbytes;
416 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
417 orig_nbytes,
418 &ce_data_base);
419 if (!data_buf) {
420 ret = -ENOMEM;
421 goto done;
422 }
423
424 /* Copy caller's data to allocated DMA buf */
425 WARN_ON_ONCE(orig_nbytes & 3);
426 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
427 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
428
429 /*
430 * The address supplied by the caller is in the
431 * Target CPU virtual address space.
432 *
433 * In order to use this address with the diagnostic CE,
434 * convert it from
435 * Target CPU virtual address space
436 * to
437 * CE address space
438 */
439 ath10k_pci_wake(ar);
440 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
441 ath10k_pci_sleep(ar);
442
443 remaining_bytes = orig_nbytes;
444 ce_data = ce_data_base;
445 while (remaining_bytes) {
446 /* FIXME: check cast */
447 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
448
449 /* Set up to receive directly into Target(!) address */
450 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
451 if (ret != 0)
452 goto done;
453
454 /*
455 * Request CE to send caller-supplied data that
456 * was copied to bounce buffer to Target(!) address.
457 */
458 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
459 nbytes, 0, 0);
460 if (ret != 0)
461 goto done;
462
463 i = 0;
464 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
465 &completed_nbytes,
466 &id) != 0) {
467 mdelay(1);
468
469 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
470 ret = -EBUSY;
471 goto done;
472 }
473 }
474
475 if (nbytes != completed_nbytes) {
476 ret = -EIO;
477 goto done;
478 }
479
480 if (buf != ce_data) {
481 ret = -EIO;
482 goto done;
483 }
484
485 i = 0;
486 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
487 &completed_nbytes,
488 &id, &flags) != 0) {
489 mdelay(1);
490
491 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
492 ret = -EBUSY;
493 goto done;
494 }
495 }
496
497 if (nbytes != completed_nbytes) {
498 ret = -EIO;
499 goto done;
500 }
501
502 if (buf != address) {
503 ret = -EIO;
504 goto done;
505 }
506
507 remaining_bytes -= nbytes;
508 address += nbytes;
509 ce_data += nbytes;
510 }
511
512done:
513 if (data_buf) {
514 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
515 ce_data_base);
516 }
517
518 if (ret != 0)
519 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
520 address);
521
522 return ret;
523}
524
525/* Write 4B data to Target memory or register */
526static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
527 u32 data)
528{
529 /* Assume range doesn't cross this boundary */
530 if (address >= DRAM_BASE_ADDRESS)
531 return ath10k_pci_diag_write_mem(ar, address, &data,
532 sizeof(u32));
533
534 ath10k_pci_wake(ar);
535 ath10k_pci_write32(ar, address, data);
536 ath10k_pci_sleep(ar);
537 return 0;
538}
539
540static bool ath10k_pci_target_is_awake(struct ath10k *ar)
541{
542 void __iomem *mem = ath10k_pci_priv(ar)->mem;
543 u32 val;
544 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
545 RTC_STATE_ADDRESS);
546 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
547}
548
Kalle Valo3aebe542013-09-01 10:02:07 +0300549int ath10k_do_pci_wake(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300550{
551 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
552 void __iomem *pci_addr = ar_pci->mem;
553 int tot_delay = 0;
554 int curr_delay = 5;
555
556 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
557 /* Force AWAKE */
558 iowrite32(PCIE_SOC_WAKE_V_MASK,
559 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
560 PCIE_SOC_WAKE_ADDRESS);
561 }
562 atomic_inc(&ar_pci->keep_awake_count);
563
564 if (ar_pci->verified_awake)
Kalle Valo3aebe542013-09-01 10:02:07 +0300565 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300566
567 for (;;) {
568 if (ath10k_pci_target_is_awake(ar)) {
569 ar_pci->verified_awake = true;
Kalle Valo3aebe542013-09-01 10:02:07 +0300570 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300571 }
572
573 if (tot_delay > PCIE_WAKE_TIMEOUT) {
Kalle Valo3aebe542013-09-01 10:02:07 +0300574 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
575 PCIE_WAKE_TIMEOUT,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300576 atomic_read(&ar_pci->keep_awake_count));
Kalle Valo3aebe542013-09-01 10:02:07 +0300577 return -ETIMEDOUT;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300578 }
579
580 udelay(curr_delay);
581 tot_delay += curr_delay;
582
583 if (curr_delay < 50)
584 curr_delay += 5;
585 }
586}
587
588void ath10k_do_pci_sleep(struct ath10k *ar)
589{
590 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
591 void __iomem *pci_addr = ar_pci->mem;
592
593 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
594 /* Allow sleep */
595 ar_pci->verified_awake = false;
596 iowrite32(PCIE_SOC_WAKE_RESET,
597 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
598 PCIE_SOC_WAKE_ADDRESS);
599 }
600}
601
602/*
603 * FIXME: Handle OOM properly.
604 */
605static inline
Michal Kazior87263e52013-08-27 13:08:01 +0200606struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300607{
608 struct ath10k_pci_compl *compl = NULL;
609
610 spin_lock_bh(&pipe_info->pipe_lock);
611 if (list_empty(&pipe_info->compl_free)) {
612 ath10k_warn("Completion buffers are full\n");
613 goto exit;
614 }
615 compl = list_first_entry(&pipe_info->compl_free,
616 struct ath10k_pci_compl, list);
617 list_del(&compl->list);
618exit:
619 spin_unlock_bh(&pipe_info->pipe_lock);
620 return compl;
621}
622
623/* Called by lower (CE) layer when a send to Target completes. */
Michal Kazior5440ce22013-09-03 15:09:58 +0200624static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300625{
626 struct ath10k *ar = ce_state->ar;
627 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +0200628 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
Kalle Valo5e3dd152013-06-12 20:52:10 +0300629 struct ath10k_pci_compl *compl;
Michal Kazior5440ce22013-09-03 15:09:58 +0200630 void *transfer_context;
631 u32 ce_data;
632 unsigned int nbytes;
633 unsigned int transfer_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300634
Michal Kazior5440ce22013-09-03 15:09:58 +0200635 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
636 &ce_data, &nbytes,
637 &transfer_id) == 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300638 compl = get_free_compl(pipe_info);
639 if (!compl)
640 break;
641
Michal Kaziorf9d8fec2013-08-13 07:54:56 +0200642 compl->state = ATH10K_PCI_COMPL_SEND;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300643 compl->ce_state = ce_state;
644 compl->pipe_info = pipe_info;
Kalle Valoaa5c1db2013-09-01 10:01:46 +0300645 compl->skb = transfer_context;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300646 compl->nbytes = nbytes;
647 compl->transfer_id = transfer_id;
648 compl->flags = 0;
649
650 /*
651 * Add the completion to the processing queue.
652 */
653 spin_lock_bh(&ar_pci->compl_lock);
654 list_add_tail(&compl->list, &ar_pci->compl_process);
655 spin_unlock_bh(&ar_pci->compl_lock);
Michal Kazior5440ce22013-09-03 15:09:58 +0200656 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300657
658 ath10k_pci_process_ce(ar);
659}
660
661/* Called by lower (CE) layer when data is received from the Target. */
Michal Kazior5440ce22013-09-03 15:09:58 +0200662static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300663{
664 struct ath10k *ar = ce_state->ar;
665 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +0200666 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
Kalle Valo5e3dd152013-06-12 20:52:10 +0300667 struct ath10k_pci_compl *compl;
668 struct sk_buff *skb;
Michal Kazior5440ce22013-09-03 15:09:58 +0200669 void *transfer_context;
670 u32 ce_data;
671 unsigned int nbytes;
672 unsigned int transfer_id;
673 unsigned int flags;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300674
Michal Kazior5440ce22013-09-03 15:09:58 +0200675 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
676 &ce_data, &nbytes, &transfer_id,
677 &flags) == 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300678 compl = get_free_compl(pipe_info);
679 if (!compl)
680 break;
681
Michal Kaziorf9d8fec2013-08-13 07:54:56 +0200682 compl->state = ATH10K_PCI_COMPL_RECV;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300683 compl->ce_state = ce_state;
684 compl->pipe_info = pipe_info;
Kalle Valoaa5c1db2013-09-01 10:01:46 +0300685 compl->skb = transfer_context;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300686 compl->nbytes = nbytes;
687 compl->transfer_id = transfer_id;
688 compl->flags = flags;
689
690 skb = transfer_context;
691 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
692 skb->len + skb_tailroom(skb),
693 DMA_FROM_DEVICE);
694 /*
695 * Add the completion to the processing queue.
696 */
697 spin_lock_bh(&ar_pci->compl_lock);
698 list_add_tail(&compl->list, &ar_pci->compl_process);
699 spin_unlock_bh(&ar_pci->compl_lock);
Michal Kazior5440ce22013-09-03 15:09:58 +0200700 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300701
702 ath10k_pci_process_ce(ar);
703}
704
705/* Send the first nbytes bytes of the buffer */
706static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
707 unsigned int transfer_id,
708 unsigned int bytes, struct sk_buff *nbuf)
709{
710 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
711 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +0200712 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
Michal Kazior2aa39112013-08-27 13:08:02 +0200713 struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300714 unsigned int len;
715 u32 flags = 0;
716 int ret;
717
Kalle Valo5e3dd152013-06-12 20:52:10 +0300718 len = min(bytes, nbuf->len);
719 bytes -= len;
720
721 if (len & 3)
722 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
723
724 ath10k_dbg(ATH10K_DBG_PCI,
725 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
726 nbuf->data, (unsigned long long) skb_cb->paddr,
727 nbuf->len, len);
728 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
729 "ath10k tx: data: ",
730 nbuf->data, nbuf->len);
731
Michal Kazior2e761b52013-10-02 11:03:40 +0200732 ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
733 flags);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300734 if (ret)
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100735 ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300736
737 return ret;
738}
739
740static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
741{
742 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior3efcb3b2013-10-02 11:03:41 +0200743 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300744}
745
746static void ath10k_pci_hif_dump_area(struct ath10k *ar)
747{
748 u32 reg_dump_area = 0;
749 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
750 u32 host_addr;
751 int ret;
752 u32 i;
753
754 ath10k_err("firmware crashed!\n");
755 ath10k_err("hardware name %s version 0x%x\n",
756 ar->hw_params.name, ar->target_version);
757 ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
758 ar->fw_version_minor, ar->fw_version_release,
759 ar->fw_version_build);
760
761 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100762 ret = ath10k_pci_diag_read_mem(ar, host_addr,
763 &reg_dump_area, sizeof(u32));
764 if (ret) {
765 ath10k_err("failed to read FW dump area address: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300766 return;
767 }
768
769 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
770
771 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
772 &reg_dump_values[0],
773 REG_DUMP_COUNT_QCA988X * sizeof(u32));
774 if (ret != 0) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +0100775 ath10k_err("failed to read FW dump area: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300776 return;
777 }
778
779 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
780
781 ath10k_err("target Register Dump\n");
782 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
783 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
784 i,
785 reg_dump_values[i],
786 reg_dump_values[i + 1],
787 reg_dump_values[i + 2],
788 reg_dump_values[i + 3]);
Michal Kazioraffd3212013-07-16 09:54:35 +0200789
Michal Kazior5e90de82013-10-16 16:46:05 +0300790 queue_work(ar->workqueue, &ar->restart_work);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300791}
792
793static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
794 int force)
795{
796 if (!force) {
797 int resources;
798 /*
799 * Decide whether to actually poll for completions, or just
800 * wait for a later chance.
801 * If there seem to be plenty of resources left, then just wait
802 * since checking involves reading a CE register, which is a
803 * relatively expensive operation.
804 */
805 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
806
807 /*
808 * If at least 50% of the total resources are still available,
809 * don't bother checking again yet.
810 */
811 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
812 return;
813 }
814 ath10k_ce_per_engine_service(ar, pipe);
815}
816
Michal Kaziore799bbf2013-07-05 16:15:12 +0300817static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
818 struct ath10k_hif_cb *callbacks)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300819{
820 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
821
822 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
823
824 memcpy(&ar_pci->msg_callbacks_current, callbacks,
825 sizeof(ar_pci->msg_callbacks_current));
826}
827
Michal Kaziorc80de122013-11-25 14:06:23 +0100828static int ath10k_pci_alloc_compl(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300829{
830 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300831 const struct ce_attr *attr;
Michal Kazior87263e52013-08-27 13:08:01 +0200832 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300833 struct ath10k_pci_compl *compl;
Michal Kaziorc80de122013-11-25 14:06:23 +0100834 int i, pipe_num, completions;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300835
836 spin_lock_init(&ar_pci->compl_lock);
837 INIT_LIST_HEAD(&ar_pci->compl_process);
838
Michal Kaziorfad6ed72013-11-08 08:01:23 +0100839 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300840 pipe_info = &ar_pci->pipe_info[pipe_num];
841
842 spin_lock_init(&pipe_info->pipe_lock);
843 INIT_LIST_HEAD(&pipe_info->compl_free);
844
845 /* Handle Diagnostic CE specially */
Michal Kaziorc80de122013-11-25 14:06:23 +0100846 if (pipe_info->ce_hdl == ar_pci->ce_diag)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300847 continue;
848
849 attr = &host_ce_config_wlan[pipe_num];
850 completions = 0;
851
Michal Kaziorc80de122013-11-25 14:06:23 +0100852 if (attr->src_nentries)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300853 completions += attr->src_nentries;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300854
Michal Kaziorc80de122013-11-25 14:06:23 +0100855 if (attr->dest_nentries)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300856 completions += attr->dest_nentries;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300857
858 for (i = 0; i < completions; i++) {
Michal Kaziorffe5daa2013-08-13 07:54:55 +0200859 compl = kmalloc(sizeof(*compl), GFP_KERNEL);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300860 if (!compl) {
861 ath10k_warn("No memory for completion state\n");
Michal Kaziorc80de122013-11-25 14:06:23 +0100862 ath10k_pci_cleanup_ce(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300863 return -ENOMEM;
864 }
865
Michal Kaziorf9d8fec2013-08-13 07:54:56 +0200866 compl->state = ATH10K_PCI_COMPL_FREE;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300867 list_add_tail(&compl->list, &pipe_info->compl_free);
868 }
869 }
870
871 return 0;
872}
873
Michal Kaziorc80de122013-11-25 14:06:23 +0100874static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
875{
876 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
877 const struct ce_attr *attr;
878 struct ath10k_pci_pipe *pipe_info;
879 int pipe_num, disable_interrupts;
880
881 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
882 pipe_info = &ar_pci->pipe_info[pipe_num];
883
884 /* Handle Diagnostic CE specially */
885 if (pipe_info->ce_hdl == ar_pci->ce_diag)
886 continue;
887
888 attr = &host_ce_config_wlan[pipe_num];
889
890 if (attr->src_nentries) {
891 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
892 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
893 ath10k_pci_ce_send_done,
894 disable_interrupts);
895 }
896
897 if (attr->dest_nentries)
898 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
899 ath10k_pci_ce_recv_data);
900 }
901
902 return 0;
903}
904
Michal Kazior96a9d0d2013-11-08 08:01:25 +0100905static void ath10k_pci_kill_tasklet(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300906{
907 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300908 int i;
909
Kalle Valo5e3dd152013-06-12 20:52:10 +0300910 tasklet_kill(&ar_pci->intr_tq);
Michal Kazior103d4f52013-11-08 08:01:24 +0100911 tasklet_kill(&ar_pci->msi_fw_err);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300912
913 for (i = 0; i < CE_COUNT; i++)
914 tasklet_kill(&ar_pci->pipe_info[i].intr);
Michal Kazior96a9d0d2013-11-08 08:01:25 +0100915}
916
917static void ath10k_pci_stop_ce(struct ath10k *ar)
918{
919 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
920 struct ath10k_pci_compl *compl;
921 struct sk_buff *skb;
Michal Kazior28642f42013-11-08 08:01:31 +0100922 int ret;
Michal Kazior96a9d0d2013-11-08 08:01:25 +0100923
Michal Kazior28642f42013-11-08 08:01:31 +0100924 ret = ath10k_ce_disable_interrupts(ar);
925 if (ret)
926 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
927
Michal Kazior96a9d0d2013-11-08 08:01:25 +0100928 ath10k_pci_kill_tasklet(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300929
930 /* Mark pending completions as aborted, so that upper layers free up
931 * their associated resources */
932 spin_lock_bh(&ar_pci->compl_lock);
933 list_for_each_entry(compl, &ar_pci->compl_process, list) {
Kalle Valoaa5c1db2013-09-01 10:01:46 +0300934 skb = compl->skb;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300935 ATH10K_SKB_CB(skb)->is_aborted = true;
936 }
937 spin_unlock_bh(&ar_pci->compl_lock);
938}
939
940static void ath10k_pci_cleanup_ce(struct ath10k *ar)
941{
942 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
943 struct ath10k_pci_compl *compl, *tmp;
Michal Kazior87263e52013-08-27 13:08:01 +0200944 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300945 struct sk_buff *netbuf;
946 int pipe_num;
947
948 /* Free pending completions. */
949 spin_lock_bh(&ar_pci->compl_lock);
950 if (!list_empty(&ar_pci->compl_process))
951 ath10k_warn("pending completions still present! possible memory leaks.\n");
952
953 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
954 list_del(&compl->list);
Kalle Valoaa5c1db2013-09-01 10:01:46 +0300955 netbuf = compl->skb;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300956 dev_kfree_skb_any(netbuf);
957 kfree(compl);
958 }
959 spin_unlock_bh(&ar_pci->compl_lock);
960
961 /* Free unused completions for each pipe. */
Michal Kaziorfad6ed72013-11-08 08:01:23 +0100962 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300963 pipe_info = &ar_pci->pipe_info[pipe_num];
964
965 spin_lock_bh(&pipe_info->pipe_lock);
966 list_for_each_entry_safe(compl, tmp,
967 &pipe_info->compl_free, list) {
968 list_del(&compl->list);
969 kfree(compl);
970 }
971 spin_unlock_bh(&pipe_info->pipe_lock);
972 }
973}
974
975static void ath10k_pci_process_ce(struct ath10k *ar)
976{
977 struct ath10k_pci *ar_pci = ar->hif.priv;
978 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
979 struct ath10k_pci_compl *compl;
980 struct sk_buff *skb;
981 unsigned int nbytes;
982 int ret, send_done = 0;
983
984 /* Upper layers aren't ready to handle tx/rx completions in parallel so
985 * we must serialize all completion processing. */
986
987 spin_lock_bh(&ar_pci->compl_lock);
988 if (ar_pci->compl_processing) {
989 spin_unlock_bh(&ar_pci->compl_lock);
990 return;
991 }
992 ar_pci->compl_processing = true;
993 spin_unlock_bh(&ar_pci->compl_lock);
994
995 for (;;) {
996 spin_lock_bh(&ar_pci->compl_lock);
997 if (list_empty(&ar_pci->compl_process)) {
998 spin_unlock_bh(&ar_pci->compl_lock);
999 break;
1000 }
1001 compl = list_first_entry(&ar_pci->compl_process,
1002 struct ath10k_pci_compl, list);
1003 list_del(&compl->list);
1004 spin_unlock_bh(&ar_pci->compl_lock);
1005
Michal Kaziorf9d8fec2013-08-13 07:54:56 +02001006 switch (compl->state) {
1007 case ATH10K_PCI_COMPL_SEND:
Kalle Valo5e3dd152013-06-12 20:52:10 +03001008 cb->tx_completion(ar,
Kalle Valoaa5c1db2013-09-01 10:01:46 +03001009 compl->skb,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001010 compl->transfer_id);
1011 send_done = 1;
Michal Kaziorf9d8fec2013-08-13 07:54:56 +02001012 break;
1013 case ATH10K_PCI_COMPL_RECV:
Kalle Valo5e3dd152013-06-12 20:52:10 +03001014 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
1015 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001016 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1017 compl->pipe_info->pipe_num, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001018 break;
1019 }
1020
Kalle Valoaa5c1db2013-09-01 10:01:46 +03001021 skb = compl->skb;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001022 nbytes = compl->nbytes;
1023
1024 ath10k_dbg(ATH10K_DBG_PCI,
1025 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
1026 skb, nbytes);
1027 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1028 "ath10k rx: ", skb->data, nbytes);
1029
1030 if (skb->len + skb_tailroom(skb) >= nbytes) {
1031 skb_trim(skb, 0);
1032 skb_put(skb, nbytes);
1033 cb->rx_completion(ar, skb,
1034 compl->pipe_info->pipe_num);
1035 } else {
1036 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1037 nbytes,
1038 skb->len + skb_tailroom(skb));
1039 }
Michal Kaziorf9d8fec2013-08-13 07:54:56 +02001040 break;
1041 case ATH10K_PCI_COMPL_FREE:
1042 ath10k_warn("free completion cannot be processed\n");
1043 break;
1044 default:
1045 ath10k_warn("invalid completion state (%d)\n",
1046 compl->state);
1047 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001048 }
1049
Michal Kaziorf9d8fec2013-08-13 07:54:56 +02001050 compl->state = ATH10K_PCI_COMPL_FREE;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001051
1052 /*
1053 * Add completion back to the pipe's free list.
1054 */
1055 spin_lock_bh(&compl->pipe_info->pipe_lock);
1056 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001057 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1058 }
1059
1060 spin_lock_bh(&ar_pci->compl_lock);
1061 ar_pci->compl_processing = false;
1062 spin_unlock_bh(&ar_pci->compl_lock);
1063}
1064
1065/* TODO - temporary mapping while we have too few CE's */
1066static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1067 u16 service_id, u8 *ul_pipe,
1068 u8 *dl_pipe, int *ul_is_polled,
1069 int *dl_is_polled)
1070{
1071 int ret = 0;
1072
1073 /* polling for received messages not supported */
1074 *dl_is_polled = 0;
1075
1076 switch (service_id) {
1077 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1078 /*
1079 * Host->target HTT gets its own pipe, so it can be polled
1080 * while other pipes are interrupt driven.
1081 */
1082 *ul_pipe = 4;
1083 /*
1084 * Use the same target->host pipe for HTC ctrl, HTC raw
1085 * streams, and HTT.
1086 */
1087 *dl_pipe = 1;
1088 break;
1089
1090 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1091 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1092 /*
1093 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1094 * HTC_CTRL_RSVD_SVC could share the same pipe as the
1095 * WMI services. So, if another CE is needed, change
1096 * this to *ul_pipe = 3, which frees up CE 0.
1097 */
1098 /* *ul_pipe = 3; */
1099 *ul_pipe = 0;
1100 *dl_pipe = 1;
1101 break;
1102
1103 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1104 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1105 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1106 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1107
1108 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1109 *ul_pipe = 3;
1110 *dl_pipe = 2;
1111 break;
1112
1113 /* pipe 5 unused */
1114 /* pipe 6 reserved */
1115 /* pipe 7 reserved */
1116
1117 default:
1118 ret = -1;
1119 break;
1120 }
1121 *ul_is_polled =
1122 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1123
1124 return ret;
1125}
1126
1127static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1128 u8 *ul_pipe, u8 *dl_pipe)
1129{
1130 int ul_is_polled, dl_is_polled;
1131
1132 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1133 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1134 ul_pipe,
1135 dl_pipe,
1136 &ul_is_polled,
1137 &dl_is_polled);
1138}
1139
Michal Kazior87263e52013-08-27 13:08:01 +02001140static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
Kalle Valo5e3dd152013-06-12 20:52:10 +03001141 int num)
1142{
1143 struct ath10k *ar = pipe_info->hif_ce_state;
1144 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001145 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001146 struct sk_buff *skb;
1147 dma_addr_t ce_data;
1148 int i, ret = 0;
1149
1150 if (pipe_info->buf_sz == 0)
1151 return 0;
1152
1153 for (i = 0; i < num; i++) {
1154 skb = dev_alloc_skb(pipe_info->buf_sz);
1155 if (!skb) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001156 ath10k_warn("failed to allocate skbuff for pipe %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001157 num);
1158 ret = -ENOMEM;
1159 goto err;
1160 }
1161
1162 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1163
1164 ce_data = dma_map_single(ar->dev, skb->data,
1165 skb->len + skb_tailroom(skb),
1166 DMA_FROM_DEVICE);
1167
1168 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001169 ath10k_warn("failed to DMA map sk_buff\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001170 dev_kfree_skb_any(skb);
1171 ret = -EIO;
1172 goto err;
1173 }
1174
1175 ATH10K_SKB_CB(skb)->paddr = ce_data;
1176
1177 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1178 pipe_info->buf_sz,
1179 PCI_DMA_FROMDEVICE);
1180
1181 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1182 ce_data);
1183 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001184 ath10k_warn("failed to enqueue to pipe %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001185 num, ret);
1186 goto err;
1187 }
1188 }
1189
1190 return ret;
1191
1192err:
1193 ath10k_pci_rx_pipe_cleanup(pipe_info);
1194 return ret;
1195}
1196
1197static int ath10k_pci_post_rx(struct ath10k *ar)
1198{
1199 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001200 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001201 const struct ce_attr *attr;
1202 int pipe_num, ret = 0;
1203
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001204 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001205 pipe_info = &ar_pci->pipe_info[pipe_num];
1206 attr = &host_ce_config_wlan[pipe_num];
1207
1208 if (attr->dest_nentries == 0)
1209 continue;
1210
1211 ret = ath10k_pci_post_rx_pipe(pipe_info,
1212 attr->dest_nentries - 1);
1213 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001214 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1215 pipe_num, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001216
1217 for (; pipe_num >= 0; pipe_num--) {
1218 pipe_info = &ar_pci->pipe_info[pipe_num];
1219 ath10k_pci_rx_pipe_cleanup(pipe_info);
1220 }
1221 return ret;
1222 }
1223 }
1224
1225 return 0;
1226}
1227
1228static int ath10k_pci_hif_start(struct ath10k *ar)
1229{
1230 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1231 int ret;
1232
Michal Kaziorc80de122013-11-25 14:06:23 +01001233 ret = ath10k_pci_alloc_compl(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001234 if (ret) {
Michal Kaziorc80de122013-11-25 14:06:23 +01001235 ath10k_warn("failed to allocate CE completions: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001236 return ret;
1237 }
1238
Michal Kaziorc80de122013-11-25 14:06:23 +01001239 ret = ath10k_pci_setup_ce_irq(ar);
1240 if (ret) {
1241 ath10k_warn("failed to setup CE interrupts: %d\n", ret);
1242 goto err_free_compl;
1243 }
1244
Kalle Valo5e3dd152013-06-12 20:52:10 +03001245 /* Post buffers once to start things off. */
1246 ret = ath10k_pci_post_rx(ar);
1247 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001248 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1249 ret);
Michal Kaziorc80de122013-11-25 14:06:23 +01001250 goto err_stop_ce;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001251 }
1252
1253 ar_pci->started = 1;
1254 return 0;
Michal Kaziorc80de122013-11-25 14:06:23 +01001255
1256err_stop_ce:
1257 ath10k_pci_stop_ce(ar);
1258 ath10k_pci_process_ce(ar);
1259err_free_compl:
1260 ath10k_pci_cleanup_ce(ar);
1261 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001262}
1263
Michal Kazior87263e52013-08-27 13:08:01 +02001264static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001265{
1266 struct ath10k *ar;
1267 struct ath10k_pci *ar_pci;
Michal Kazior2aa39112013-08-27 13:08:02 +02001268 struct ath10k_ce_pipe *ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001269 u32 buf_sz;
1270 struct sk_buff *netbuf;
1271 u32 ce_data;
1272
1273 buf_sz = pipe_info->buf_sz;
1274
1275 /* Unused Copy Engine */
1276 if (buf_sz == 0)
1277 return;
1278
1279 ar = pipe_info->hif_ce_state;
1280 ar_pci = ath10k_pci_priv(ar);
1281
1282 if (!ar_pci->started)
1283 return;
1284
1285 ce_hdl = pipe_info->ce_hdl;
1286
1287 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1288 &ce_data) == 0) {
1289 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1290 netbuf->len + skb_tailroom(netbuf),
1291 DMA_FROM_DEVICE);
1292 dev_kfree_skb_any(netbuf);
1293 }
1294}
1295
Michal Kazior87263e52013-08-27 13:08:01 +02001296static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001297{
1298 struct ath10k *ar;
1299 struct ath10k_pci *ar_pci;
Michal Kazior2aa39112013-08-27 13:08:02 +02001300 struct ath10k_ce_pipe *ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001301 struct sk_buff *netbuf;
1302 u32 ce_data;
1303 unsigned int nbytes;
1304 unsigned int id;
1305 u32 buf_sz;
1306
1307 buf_sz = pipe_info->buf_sz;
1308
1309 /* Unused Copy Engine */
1310 if (buf_sz == 0)
1311 return;
1312
1313 ar = pipe_info->hif_ce_state;
1314 ar_pci = ath10k_pci_priv(ar);
1315
1316 if (!ar_pci->started)
1317 return;
1318
1319 ce_hdl = pipe_info->ce_hdl;
1320
1321 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1322 &ce_data, &nbytes, &id) == 0) {
Kalle Valoe9bb0aa2013-09-08 18:36:11 +03001323 /*
1324 * Indicate the completion to higer layer to free
1325 * the buffer
1326 */
Michal Kazior2415fc12013-11-08 08:01:32 +01001327
1328 if (!netbuf) {
1329 ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
1330 ce_hdl->id);
1331 continue;
1332 }
1333
Kalle Valoe9bb0aa2013-09-08 18:36:11 +03001334 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1335 ar_pci->msg_callbacks_current.tx_completion(ar,
1336 netbuf,
1337 id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001338 }
1339}
1340
1341/*
1342 * Cleanup residual buffers for device shutdown:
1343 * buffers that were enqueued for receive
1344 * buffers that were to be sent
1345 * Note: Buffers that had completed but which were
1346 * not yet processed are on a completion queue. They
1347 * are handled when the completion thread shuts down.
1348 */
1349static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1350{
1351 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1352 int pipe_num;
1353
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001354 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Michal Kazior87263e52013-08-27 13:08:01 +02001355 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001356
1357 pipe_info = &ar_pci->pipe_info[pipe_num];
1358 ath10k_pci_rx_pipe_cleanup(pipe_info);
1359 ath10k_pci_tx_pipe_cleanup(pipe_info);
1360 }
1361}
1362
1363static void ath10k_pci_ce_deinit(struct ath10k *ar)
1364{
1365 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001366 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001367 int pipe_num;
1368
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001369 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001370 pipe_info = &ar_pci->pipe_info[pipe_num];
1371 if (pipe_info->ce_hdl) {
1372 ath10k_ce_deinit(pipe_info->ce_hdl);
1373 pipe_info->ce_hdl = NULL;
1374 pipe_info->buf_sz = 0;
1375 }
1376 }
1377}
1378
Michal Kazior32270b62013-08-02 09:15:47 +02001379static void ath10k_pci_disable_irqs(struct ath10k *ar)
1380{
1381 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1382 int i;
1383
1384 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1385 disable_irq(ar_pci->pdev->irq + i);
1386}
1387
Kalle Valo5e3dd152013-06-12 20:52:10 +03001388static void ath10k_pci_hif_stop(struct ath10k *ar)
1389{
Michal Kazior32270b62013-08-02 09:15:47 +02001390 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1391
Kalle Valo5e3dd152013-06-12 20:52:10 +03001392 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1393
Michal Kazior32270b62013-08-02 09:15:47 +02001394 /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001395 * by upon power_up. */
Michal Kazior32270b62013-08-02 09:15:47 +02001396 ath10k_pci_disable_irqs(ar);
1397
Kalle Valo5e3dd152013-06-12 20:52:10 +03001398 ath10k_pci_stop_ce(ar);
1399
1400 /* At this point, asynchronous threads are stopped, the target should
1401 * not DMA nor interrupt. We process the leftovers and then free
1402 * everything else up. */
1403
1404 ath10k_pci_process_ce(ar);
1405 ath10k_pci_cleanup_ce(ar);
1406 ath10k_pci_buffer_cleanup(ar);
Michal Kazior32270b62013-08-02 09:15:47 +02001407
Michal Kazior6a42a472013-11-08 08:01:35 +01001408 /* Make the sure the device won't access any structures on the host by
1409 * resetting it. The device was fed with PCI CE ringbuffer
1410 * configuration during init. If ringbuffers are freed and the device
1411 * were to access them this could lead to memory corruption on the
1412 * host. */
1413 ath10k_pci_device_reset(ar);
1414
Michal Kazior32270b62013-08-02 09:15:47 +02001415 ar_pci->started = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001416}
1417
1418static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1419 void *req, u32 req_len,
1420 void *resp, u32 *resp_len)
1421{
1422 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001423 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1424 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1425 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1426 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001427 dma_addr_t req_paddr = 0;
1428 dma_addr_t resp_paddr = 0;
1429 struct bmi_xfer xfer = {};
1430 void *treq, *tresp = NULL;
1431 int ret = 0;
1432
Michal Kazior85622cd2013-11-25 14:06:22 +01001433 might_sleep();
1434
Kalle Valo5e3dd152013-06-12 20:52:10 +03001435 if (resp && !resp_len)
1436 return -EINVAL;
1437
1438 if (resp && resp_len && *resp_len == 0)
1439 return -EINVAL;
1440
1441 treq = kmemdup(req, req_len, GFP_KERNEL);
1442 if (!treq)
1443 return -ENOMEM;
1444
1445 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1446 ret = dma_mapping_error(ar->dev, req_paddr);
1447 if (ret)
1448 goto err_dma;
1449
1450 if (resp && resp_len) {
1451 tresp = kzalloc(*resp_len, GFP_KERNEL);
1452 if (!tresp) {
1453 ret = -ENOMEM;
1454 goto err_req;
1455 }
1456
1457 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1458 DMA_FROM_DEVICE);
1459 ret = dma_mapping_error(ar->dev, resp_paddr);
1460 if (ret)
1461 goto err_req;
1462
1463 xfer.wait_for_resp = true;
1464 xfer.resp_len = 0;
1465
1466 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1467 }
1468
1469 init_completion(&xfer.done);
1470
1471 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1472 if (ret)
1473 goto err_resp;
1474
Michal Kazior85622cd2013-11-25 14:06:22 +01001475 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1476 if (ret) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001477 u32 unused_buffer;
1478 unsigned int unused_nbytes;
1479 unsigned int unused_id;
1480
Kalle Valo5e3dd152013-06-12 20:52:10 +03001481 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1482 &unused_nbytes, &unused_id);
1483 } else {
1484 /* non-zero means we did not time out */
1485 ret = 0;
1486 }
1487
1488err_resp:
1489 if (resp) {
1490 u32 unused_buffer;
1491
1492 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1493 dma_unmap_single(ar->dev, resp_paddr,
1494 *resp_len, DMA_FROM_DEVICE);
1495 }
1496err_req:
1497 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1498
1499 if (ret == 0 && resp_len) {
1500 *resp_len = min(*resp_len, xfer.resp_len);
1501 memcpy(resp, tresp, xfer.resp_len);
1502 }
1503err_dma:
1504 kfree(treq);
1505 kfree(tresp);
1506
1507 return ret;
1508}
1509
Michal Kazior5440ce22013-09-03 15:09:58 +02001510static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001511{
Michal Kazior5440ce22013-09-03 15:09:58 +02001512 struct bmi_xfer *xfer;
1513 u32 ce_data;
1514 unsigned int nbytes;
1515 unsigned int transfer_id;
1516
1517 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1518 &nbytes, &transfer_id))
1519 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001520
1521 if (xfer->wait_for_resp)
1522 return;
1523
1524 complete(&xfer->done);
1525}
1526
Michal Kazior5440ce22013-09-03 15:09:58 +02001527static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001528{
Michal Kazior5440ce22013-09-03 15:09:58 +02001529 struct bmi_xfer *xfer;
1530 u32 ce_data;
1531 unsigned int nbytes;
1532 unsigned int transfer_id;
1533 unsigned int flags;
1534
1535 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1536 &nbytes, &transfer_id, &flags))
1537 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001538
1539 if (!xfer->wait_for_resp) {
1540 ath10k_warn("unexpected: BMI data received; ignoring\n");
1541 return;
1542 }
1543
1544 xfer->resp_len = nbytes;
1545 complete(&xfer->done);
1546}
1547
Michal Kazior85622cd2013-11-25 14:06:22 +01001548static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1549 struct ath10k_ce_pipe *rx_pipe,
1550 struct bmi_xfer *xfer)
1551{
1552 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1553
1554 while (time_before_eq(jiffies, timeout)) {
1555 ath10k_pci_bmi_send_done(tx_pipe);
1556 ath10k_pci_bmi_recv_data(rx_pipe);
1557
1558 if (completion_done(&xfer->done))
1559 return 0;
1560
1561 schedule();
1562 }
1563
1564 return -ETIMEDOUT;
1565}
1566
Kalle Valo5e3dd152013-06-12 20:52:10 +03001567/*
1568 * Map from service/endpoint to Copy Engine.
1569 * This table is derived from the CE_PCI TABLE, above.
1570 * It is passed to the Target at startup for use by firmware.
1571 */
1572static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1573 {
1574 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1575 PIPEDIR_OUT, /* out = UL = host -> target */
1576 3,
1577 },
1578 {
1579 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1580 PIPEDIR_IN, /* in = DL = target -> host */
1581 2,
1582 },
1583 {
1584 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1585 PIPEDIR_OUT, /* out = UL = host -> target */
1586 3,
1587 },
1588 {
1589 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1590 PIPEDIR_IN, /* in = DL = target -> host */
1591 2,
1592 },
1593 {
1594 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1595 PIPEDIR_OUT, /* out = UL = host -> target */
1596 3,
1597 },
1598 {
1599 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1600 PIPEDIR_IN, /* in = DL = target -> host */
1601 2,
1602 },
1603 {
1604 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1605 PIPEDIR_OUT, /* out = UL = host -> target */
1606 3,
1607 },
1608 {
1609 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1610 PIPEDIR_IN, /* in = DL = target -> host */
1611 2,
1612 },
1613 {
1614 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1615 PIPEDIR_OUT, /* out = UL = host -> target */
1616 3,
1617 },
1618 {
1619 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1620 PIPEDIR_IN, /* in = DL = target -> host */
1621 2,
1622 },
1623 {
1624 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1625 PIPEDIR_OUT, /* out = UL = host -> target */
1626 0, /* could be moved to 3 (share with WMI) */
1627 },
1628 {
1629 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1630 PIPEDIR_IN, /* in = DL = target -> host */
1631 1,
1632 },
1633 {
1634 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1635 PIPEDIR_OUT, /* out = UL = host -> target */
1636 0,
1637 },
1638 {
1639 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1640 PIPEDIR_IN, /* in = DL = target -> host */
1641 1,
1642 },
1643 {
1644 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1645 PIPEDIR_OUT, /* out = UL = host -> target */
1646 4,
1647 },
1648 {
1649 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1650 PIPEDIR_IN, /* in = DL = target -> host */
1651 1,
1652 },
1653
1654 /* (Additions here) */
1655
1656 { /* Must be last */
1657 0,
1658 0,
1659 0,
1660 },
1661};
1662
1663/*
1664 * Send an interrupt to the device to wake up the Target CPU
1665 * so it has an opportunity to notice any changed state.
1666 */
1667static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1668{
1669 int ret;
1670 u32 core_ctrl;
1671
1672 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1673 CORE_CTRL_ADDRESS,
1674 &core_ctrl);
1675 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001676 ath10k_warn("failed to read core_ctrl: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001677 return ret;
1678 }
1679
1680 /* A_INUM_FIRMWARE interrupt to Target CPU */
1681 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1682
1683 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1684 CORE_CTRL_ADDRESS,
1685 core_ctrl);
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001686 if (ret) {
1687 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1688 ret);
1689 return ret;
1690 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001691
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001692 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001693}
1694
1695static int ath10k_pci_init_config(struct ath10k *ar)
1696{
1697 u32 interconnect_targ_addr;
1698 u32 pcie_state_targ_addr = 0;
1699 u32 pipe_cfg_targ_addr = 0;
1700 u32 svc_to_pipe_map = 0;
1701 u32 pcie_config_flags = 0;
1702 u32 ealloc_value;
1703 u32 ealloc_targ_addr;
1704 u32 flag2_value;
1705 u32 flag2_targ_addr;
1706 int ret = 0;
1707
1708 /* Download to Target the CE Config and the service-to-CE map */
1709 interconnect_targ_addr =
1710 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1711
1712 /* Supply Target-side CE configuration */
1713 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1714 &pcie_state_targ_addr);
1715 if (ret != 0) {
1716 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1717 return ret;
1718 }
1719
1720 if (pcie_state_targ_addr == 0) {
1721 ret = -EIO;
1722 ath10k_err("Invalid pcie state addr\n");
1723 return ret;
1724 }
1725
1726 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1727 offsetof(struct pcie_state,
1728 pipe_cfg_addr),
1729 &pipe_cfg_targ_addr);
1730 if (ret != 0) {
1731 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1732 return ret;
1733 }
1734
1735 if (pipe_cfg_targ_addr == 0) {
1736 ret = -EIO;
1737 ath10k_err("Invalid pipe cfg addr\n");
1738 return ret;
1739 }
1740
1741 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1742 target_ce_config_wlan,
1743 sizeof(target_ce_config_wlan));
1744
1745 if (ret != 0) {
1746 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1747 return ret;
1748 }
1749
1750 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1751 offsetof(struct pcie_state,
1752 svc_to_pipe_map),
1753 &svc_to_pipe_map);
1754 if (ret != 0) {
1755 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1756 return ret;
1757 }
1758
1759 if (svc_to_pipe_map == 0) {
1760 ret = -EIO;
1761 ath10k_err("Invalid svc_to_pipe map\n");
1762 return ret;
1763 }
1764
1765 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1766 target_service_to_ce_map_wlan,
1767 sizeof(target_service_to_ce_map_wlan));
1768 if (ret != 0) {
1769 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1770 return ret;
1771 }
1772
1773 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1774 offsetof(struct pcie_state,
1775 config_flags),
1776 &pcie_config_flags);
1777 if (ret != 0) {
1778 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1779 return ret;
1780 }
1781
1782 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1783
1784 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1785 offsetof(struct pcie_state, config_flags),
1786 &pcie_config_flags,
1787 sizeof(pcie_config_flags));
1788 if (ret != 0) {
1789 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1790 return ret;
1791 }
1792
1793 /* configure early allocation */
1794 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1795
1796 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1797 if (ret != 0) {
1798 ath10k_err("Faile to get early alloc val: %d\n", ret);
1799 return ret;
1800 }
1801
1802 /* first bank is switched to IRAM */
1803 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1804 HI_EARLY_ALLOC_MAGIC_MASK);
1805 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1806 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1807
1808 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1809 if (ret != 0) {
1810 ath10k_err("Failed to set early alloc val: %d\n", ret);
1811 return ret;
1812 }
1813
1814 /* Tell Target to proceed with initialization */
1815 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1816
1817 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1818 if (ret != 0) {
1819 ath10k_err("Failed to get option val: %d\n", ret);
1820 return ret;
1821 }
1822
1823 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1824
1825 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1826 if (ret != 0) {
1827 ath10k_err("Failed to set option val: %d\n", ret);
1828 return ret;
1829 }
1830
1831 return 0;
1832}
1833
1834
1835
1836static int ath10k_pci_ce_init(struct ath10k *ar)
1837{
1838 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001839 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001840 const struct ce_attr *attr;
1841 int pipe_num;
1842
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001843 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001844 pipe_info = &ar_pci->pipe_info[pipe_num];
1845 pipe_info->pipe_num = pipe_num;
1846 pipe_info->hif_ce_state = ar;
1847 attr = &host_ce_config_wlan[pipe_num];
1848
1849 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1850 if (pipe_info->ce_hdl == NULL) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001851 ath10k_err("failed to initialize CE for pipe: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001852 pipe_num);
1853
1854 /* It is safe to call it here. It checks if ce_hdl is
1855 * valid for each pipe */
1856 ath10k_pci_ce_deinit(ar);
1857 return -1;
1858 }
1859
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001860 if (pipe_num == CE_COUNT - 1) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001861 /*
1862 * Reserve the ultimate CE for
1863 * diagnostic Window support
1864 */
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001865 ar_pci->ce_diag = pipe_info->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001866 continue;
1867 }
1868
1869 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1870 }
1871
Kalle Valo5e3dd152013-06-12 20:52:10 +03001872 return 0;
1873}
1874
1875static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1876{
1877 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1878 u32 fw_indicator_address, fw_indicator;
1879
1880 ath10k_pci_wake(ar);
1881
1882 fw_indicator_address = ar_pci->fw_indicator_address;
1883 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1884
1885 if (fw_indicator & FW_IND_EVENT_PENDING) {
1886 /* ACK: clear Target-side pending event */
1887 ath10k_pci_write32(ar, fw_indicator_address,
1888 fw_indicator & ~FW_IND_EVENT_PENDING);
1889
1890 if (ar_pci->started) {
1891 ath10k_pci_hif_dump_area(ar);
1892 } else {
1893 /*
1894 * Probable Target failure before we're prepared
1895 * to handle it. Generally unexpected.
1896 */
1897 ath10k_warn("early firmware event indicated\n");
1898 }
1899 }
1900
1901 ath10k_pci_sleep(ar);
1902}
1903
Michal Kazior8c5c5362013-07-16 09:38:50 +02001904static int ath10k_pci_hif_power_up(struct ath10k *ar)
1905{
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001906 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo95cbb6a2013-11-20 10:00:35 +02001907 const char *irq_mode;
Michal Kazior8c5c5362013-07-16 09:38:50 +02001908 int ret;
1909
1910 /*
1911 * Bring the target up cleanly.
1912 *
1913 * The target may be in an undefined state with an AUX-powered Target
1914 * and a Host in WoW mode. If the Host crashes, loses power, or is
1915 * restarted (without unloading the driver) then the Target is left
1916 * (aux) powered and running. On a subsequent driver load, the Target
1917 * is in an unexpected state. We try to catch that here in order to
1918 * reset the Target and retry the probe.
1919 */
Michal Kazior5b2589f2013-11-08 08:01:30 +01001920 ret = ath10k_pci_device_reset(ar);
1921 if (ret) {
1922 ath10k_err("failed to reset target: %d\n", ret);
Michal Kazior98563d52013-11-08 08:01:33 +01001923 goto err;
Michal Kazior5b2589f2013-11-08 08:01:30 +01001924 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02001925
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001926 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02001927 /* Force AWAKE forever */
Michal Kazior8c5c5362013-07-16 09:38:50 +02001928 ath10k_do_pci_wake(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001929
1930 ret = ath10k_pci_ce_init(ar);
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001931 if (ret) {
1932 ath10k_err("failed to initialize CE: %d\n", ret);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001933 goto err_ps;
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001934 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02001935
Michal Kazior98563d52013-11-08 08:01:33 +01001936 ret = ath10k_ce_disable_interrupts(ar);
1937 if (ret) {
1938 ath10k_err("failed to disable CE interrupts: %d\n", ret);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001939 goto err_ce;
Michal Kazior98563d52013-11-08 08:01:33 +01001940 }
1941
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001942 ret = ath10k_pci_init_irq(ar);
Michal Kazior98563d52013-11-08 08:01:33 +01001943 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001944 ath10k_err("failed to init irqs: %d\n", ret);
Michal Kazior98563d52013-11-08 08:01:33 +01001945 goto err_ce;
1946 }
1947
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001948 ret = ath10k_pci_request_irq(ar);
1949 if (ret) {
1950 ath10k_err("failed to request irqs: %d\n", ret);
1951 goto err_deinit_irq;
1952 }
1953
Michal Kazior98563d52013-11-08 08:01:33 +01001954 ret = ath10k_pci_wait_for_target_init(ar);
1955 if (ret) {
1956 ath10k_err("failed to wait for target to init: %d\n", ret);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001957 goto err_free_irq;
Michal Kazior98563d52013-11-08 08:01:33 +01001958 }
1959
1960 ret = ath10k_ce_enable_err_irq(ar);
1961 if (ret) {
1962 ath10k_err("failed to enable CE error irq: %d\n", ret);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001963 goto err_free_irq;
Michal Kazior98563d52013-11-08 08:01:33 +01001964 }
1965
1966 ret = ath10k_pci_init_config(ar);
1967 if (ret) {
1968 ath10k_err("failed to setup init config: %d\n", ret);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001969 goto err_free_irq;
Michal Kazior98563d52013-11-08 08:01:33 +01001970 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02001971
1972 ret = ath10k_pci_wake_target_cpu(ar);
1973 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001974 ath10k_err("could not wake up target CPU: %d\n", ret);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001975 goto err_free_irq;
Michal Kazior8c5c5362013-07-16 09:38:50 +02001976 }
1977
Kalle Valo95cbb6a2013-11-20 10:00:35 +02001978 if (ar_pci->num_msi_intrs > 1)
1979 irq_mode = "MSI-X";
1980 else if (ar_pci->num_msi_intrs == 1)
1981 irq_mode = "MSI";
1982 else
1983 irq_mode = "legacy";
1984
Kalle Valo650b91f2013-11-20 10:00:49 +02001985 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
1986 ath10k_info("pci irq %s\n", irq_mode);
Kalle Valo95cbb6a2013-11-20 10:00:35 +02001987
Michal Kazior8c5c5362013-07-16 09:38:50 +02001988 return 0;
1989
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001990err_free_irq:
1991 ath10k_pci_free_irq(ar);
Michal Kazior98563d52013-11-08 08:01:33 +01001992 ath10k_pci_kill_tasklet(ar);
Michal Kazior6a42a472013-11-08 08:01:35 +01001993 ath10k_pci_device_reset(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01001994err_deinit_irq:
1995 ath10k_pci_deinit_irq(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02001996err_ce:
1997 ath10k_pci_ce_deinit(ar);
1998err_ps:
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02001999 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02002000 ath10k_do_pci_sleep(ar);
2001err:
2002 return ret;
2003}
2004
2005static void ath10k_pci_hif_power_down(struct ath10k *ar)
2006{
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002007 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2008
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002009 ath10k_pci_free_irq(ar);
2010 ath10k_pci_deinit_irq(ar);
Michal Kazior6a42a472013-11-08 08:01:35 +01002011 ath10k_pci_device_reset(ar);
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002012
Michal Kazior8c5c5362013-07-16 09:38:50 +02002013 ath10k_pci_ce_deinit(ar);
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002014 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
Michal Kazior8c5c5362013-07-16 09:38:50 +02002015 ath10k_do_pci_sleep(ar);
2016}
2017
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002018#ifdef CONFIG_PM
2019
2020#define ATH10K_PCI_PM_CONTROL 0x44
2021
2022static int ath10k_pci_hif_suspend(struct ath10k *ar)
2023{
2024 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2025 struct pci_dev *pdev = ar_pci->pdev;
2026 u32 val;
2027
2028 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2029
2030 if ((val & 0x000000ff) != 0x3) {
2031 pci_save_state(pdev);
2032 pci_disable_device(pdev);
2033 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2034 (val & 0xffffff00) | 0x03);
2035 }
2036
2037 return 0;
2038}
2039
2040static int ath10k_pci_hif_resume(struct ath10k *ar)
2041{
2042 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2043 struct pci_dev *pdev = ar_pci->pdev;
2044 u32 val;
2045
2046 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2047
2048 if ((val & 0x000000ff) != 0) {
2049 pci_restore_state(pdev);
2050 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2051 val & 0xffffff00);
2052 /*
2053 * Suspend/Resume resets the PCI configuration space,
2054 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2055 * to keep PCI Tx retries from interfering with C3 CPU state
2056 */
2057 pci_read_config_dword(pdev, 0x40, &val);
2058
2059 if ((val & 0x0000ff00) != 0)
2060 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2061 }
2062
2063 return 0;
2064}
2065#endif
2066
Kalle Valo5e3dd152013-06-12 20:52:10 +03002067static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2068 .send_head = ath10k_pci_hif_send_head,
2069 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2070 .start = ath10k_pci_hif_start,
2071 .stop = ath10k_pci_hif_stop,
2072 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2073 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2074 .send_complete_check = ath10k_pci_hif_send_complete_check,
Michal Kaziore799bbf2013-07-05 16:15:12 +03002075 .set_callbacks = ath10k_pci_hif_set_callbacks,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002076 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
Michal Kazior8c5c5362013-07-16 09:38:50 +02002077 .power_up = ath10k_pci_hif_power_up,
2078 .power_down = ath10k_pci_hif_power_down,
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002079#ifdef CONFIG_PM
2080 .suspend = ath10k_pci_hif_suspend,
2081 .resume = ath10k_pci_hif_resume,
2082#endif
Kalle Valo5e3dd152013-06-12 20:52:10 +03002083};
2084
2085static void ath10k_pci_ce_tasklet(unsigned long ptr)
2086{
Michal Kazior87263e52013-08-27 13:08:01 +02002087 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002088 struct ath10k_pci *ar_pci = pipe->ar_pci;
2089
2090 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2091}
2092
2093static void ath10k_msi_err_tasklet(unsigned long data)
2094{
2095 struct ath10k *ar = (struct ath10k *)data;
2096
2097 ath10k_pci_fw_interrupt_handler(ar);
2098}
2099
2100/*
2101 * Handler for a per-engine interrupt on a PARTICULAR CE.
2102 * This is used in cases where each CE has a private MSI interrupt.
2103 */
2104static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2105{
2106 struct ath10k *ar = arg;
2107 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2108 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2109
Dan Carpentere5742672013-06-18 10:28:46 +03002110 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002111 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2112 return IRQ_HANDLED;
2113 }
2114
2115 /*
2116 * NOTE: We are able to derive ce_id from irq because we
2117 * use a one-to-one mapping for CE's 0..5.
2118 * CE's 6 & 7 do not use interrupts at all.
2119 *
2120 * This mapping must be kept in sync with the mapping
2121 * used by firmware.
2122 */
2123 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2124 return IRQ_HANDLED;
2125}
2126
2127static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2128{
2129 struct ath10k *ar = arg;
2130 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2131
2132 tasklet_schedule(&ar_pci->msi_fw_err);
2133 return IRQ_HANDLED;
2134}
2135
2136/*
2137 * Top-level interrupt handler for all PCI interrupts from a Target.
2138 * When a block of MSI interrupts is allocated, this top-level handler
2139 * is not used; instead, we directly call the correct sub-handler.
2140 */
2141static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2142{
2143 struct ath10k *ar = arg;
2144 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2145
2146 if (ar_pci->num_msi_intrs == 0) {
Michal Kaziore5398872013-11-25 14:06:20 +01002147 if (!ath10k_pci_irq_pending(ar))
2148 return IRQ_NONE;
2149
Kalle Valo5e3dd152013-06-12 20:52:10 +03002150 /*
2151 * IMPORTANT: INTR_CLR regiser has to be set after
2152 * INTR_ENABLE is set to 0, otherwise interrupt can not be
2153 * really cleared.
2154 */
2155 iowrite32(0, ar_pci->mem +
2156 (SOC_CORE_BASE_ADDRESS |
2157 PCIE_INTR_ENABLE_ADDRESS));
2158 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2159 PCIE_INTR_CE_MASK_ALL,
2160 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2161 PCIE_INTR_CLR_ADDRESS));
2162 /*
2163 * IMPORTANT: this extra read transaction is required to
2164 * flush the posted write buffer.
2165 */
2166 (void) ioread32(ar_pci->mem +
2167 (SOC_CORE_BASE_ADDRESS |
2168 PCIE_INTR_ENABLE_ADDRESS));
2169 }
2170
2171 tasklet_schedule(&ar_pci->intr_tq);
2172
2173 return IRQ_HANDLED;
2174}
2175
2176static void ath10k_pci_tasklet(unsigned long data)
2177{
2178 struct ath10k *ar = (struct ath10k *)data;
2179 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2180
2181 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2182 ath10k_ce_per_engine_service_any(ar);
2183
2184 if (ar_pci->num_msi_intrs == 0) {
2185 /* Enable Legacy PCI line interrupts */
2186 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2187 PCIE_INTR_CE_MASK_ALL,
2188 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2189 PCIE_INTR_ENABLE_ADDRESS));
2190 /*
2191 * IMPORTANT: this extra read transaction is required to
2192 * flush the posted write buffer
2193 */
2194 (void) ioread32(ar_pci->mem +
2195 (SOC_CORE_BASE_ADDRESS |
2196 PCIE_INTR_ENABLE_ADDRESS));
2197 }
2198}
2199
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002200static int ath10k_pci_request_irq_msix(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002201{
2202 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002203 int ret, i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002204
2205 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2206 ath10k_pci_msi_fw_handler,
2207 IRQF_SHARED, "ath10k_pci", ar);
Michal Kazior591ecdb2013-07-31 10:55:15 +02002208 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002209 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
Michal Kazior591ecdb2013-07-31 10:55:15 +02002210 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002211 return ret;
Michal Kazior591ecdb2013-07-31 10:55:15 +02002212 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002213
2214 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2215 ret = request_irq(ar_pci->pdev->irq + i,
2216 ath10k_pci_per_engine_handler,
2217 IRQF_SHARED, "ath10k_pci", ar);
2218 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002219 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002220 ar_pci->pdev->irq + i, ret);
2221
Michal Kazior87b14232013-06-26 08:50:50 +02002222 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2223 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002224
Michal Kazior87b14232013-06-26 08:50:50 +02002225 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002226 return ret;
2227 }
2228 }
2229
Kalle Valo5e3dd152013-06-12 20:52:10 +03002230 return 0;
2231}
2232
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002233static int ath10k_pci_request_irq_msi(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002234{
2235 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2236 int ret;
2237
2238 ret = request_irq(ar_pci->pdev->irq,
2239 ath10k_pci_interrupt_handler,
2240 IRQF_SHARED, "ath10k_pci", ar);
Kalle Valof3782742013-10-17 11:36:15 +03002241 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002242 ath10k_warn("failed to request MSI irq %d: %d\n",
2243 ar_pci->pdev->irq, ret);
Kalle Valof3782742013-10-17 11:36:15 +03002244 return ret;
2245 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002246
Kalle Valo5e3dd152013-06-12 20:52:10 +03002247 return 0;
2248}
2249
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002250static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002251{
2252 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002253 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002254
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002255 ret = request_irq(ar_pci->pdev->irq,
2256 ath10k_pci_interrupt_handler,
2257 IRQF_SHARED, "ath10k_pci", ar);
Kalle Valof3782742013-10-17 11:36:15 +03002258 if (ret) {
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002259 ath10k_warn("failed to request legacy irq %d: %d\n",
2260 ar_pci->pdev->irq, ret);
Kalle Valof3782742013-10-17 11:36:15 +03002261 return ret;
2262 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002263
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002264 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002265}
2266
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002267static int ath10k_pci_request_irq(struct ath10k *ar)
2268{
2269 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2270
2271 switch (ar_pci->num_msi_intrs) {
2272 case 0:
2273 return ath10k_pci_request_irq_legacy(ar);
2274 case 1:
2275 return ath10k_pci_request_irq_msi(ar);
2276 case MSI_NUM_REQUEST:
2277 return ath10k_pci_request_irq_msix(ar);
2278 }
2279
2280 ath10k_warn("unknown irq configuration upon request\n");
2281 return -EINVAL;
2282}
2283
2284static void ath10k_pci_free_irq(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002285{
2286 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2287 int i;
2288
2289 /* There's at least one interrupt irregardless whether its legacy INTR
2290 * or MSI or MSI-X */
2291 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2292 free_irq(ar_pci->pdev->irq + i, ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002293}
Kalle Valo5e3dd152013-06-12 20:52:10 +03002294
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002295static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2296{
2297 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2298 int i;
2299
2300 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2301 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2302 (unsigned long)ar);
2303
2304 for (i = 0; i < CE_COUNT; i++) {
2305 ar_pci->pipe_info[i].ar_pci = ar_pci;
2306 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2307 (unsigned long)&ar_pci->pipe_info[i]);
2308 }
2309}
2310
2311static int ath10k_pci_init_irq(struct ath10k *ar)
2312{
2313 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2314 int ret;
2315
2316 ath10k_pci_init_irq_tasklets(ar);
2317
2318 if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2319 goto msi;
2320
2321 /* Try MSI-X */
2322 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2323 ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs);
2324 if (ret == 0)
2325 return 0;
2326 if (ret > 0)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002327 pci_disable_msi(ar_pci->pdev);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002328
2329msi:
2330 /* Try MSI */
2331 ar_pci->num_msi_intrs = 1;
2332 ret = pci_enable_msi(ar_pci->pdev);
2333 if (ret == 0)
2334 return 0;
2335
2336 /* Try legacy irq
2337 *
2338 * A potential race occurs here: The CORE_BASE write
2339 * depends on target correctly decoding AXI address but
2340 * host won't know when target writes BAR to CORE_CTRL.
2341 * This write might get lost if target has NOT written BAR.
2342 * For now, fix the race by repeating the write in below
2343 * synchronization checking. */
2344 ar_pci->num_msi_intrs = 0;
2345
2346 ret = ath10k_pci_wake(ar);
2347 if (ret) {
2348 ath10k_warn("failed to wake target: %d\n", ret);
2349 return ret;
2350 }
2351
2352 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2353 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2354 ath10k_pci_sleep(ar);
2355
2356 return 0;
2357}
2358
2359static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2360{
2361 int ret;
2362
2363 ret = ath10k_pci_wake(ar);
2364 if (ret) {
2365 ath10k_warn("failed to wake target: %d\n", ret);
2366 return ret;
2367 }
2368
2369 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2370 0);
2371 ath10k_pci_sleep(ar);
2372
2373 return 0;
2374}
2375
2376static int ath10k_pci_deinit_irq(struct ath10k *ar)
2377{
2378 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2379
2380 switch (ar_pci->num_msi_intrs) {
2381 case 0:
2382 return ath10k_pci_deinit_irq_legacy(ar);
2383 case 1:
2384 /* fall-through */
2385 case MSI_NUM_REQUEST:
2386 pci_disable_msi(ar_pci->pdev);
2387 return 0;
2388 }
2389
2390 ath10k_warn("unknown irq configuration upon deinit\n");
2391 return -EINVAL;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002392}
2393
Michal Kaziord7fb47f2013-11-08 08:01:26 +01002394static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002395{
2396 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2397 int wait_limit = 300; /* 3 sec */
Kalle Valof3782742013-10-17 11:36:15 +03002398 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002399
Michal Kazior98563d52013-11-08 08:01:33 +01002400 ret = ath10k_pci_wake(ar);
Kalle Valof3782742013-10-17 11:36:15 +03002401 if (ret) {
Michal Kazior5b2589f2013-11-08 08:01:30 +01002402 ath10k_err("failed to wake up target: %d\n", ret);
Kalle Valof3782742013-10-17 11:36:15 +03002403 return ret;
2404 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002405
2406 while (wait_limit-- &&
2407 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2408 FW_IND_INITIALIZED)) {
2409 if (ar_pci->num_msi_intrs == 0)
2410 /* Fix potential race by repeating CORE_BASE writes */
2411 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2412 PCIE_INTR_CE_MASK_ALL,
2413 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2414 PCIE_INTR_ENABLE_ADDRESS));
2415 mdelay(10);
2416 }
2417
2418 if (wait_limit < 0) {
Michal Kazior5b2589f2013-11-08 08:01:30 +01002419 ath10k_err("target stalled\n");
2420 ret = -EIO;
2421 goto out;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002422 }
2423
Michal Kazior5b2589f2013-11-08 08:01:30 +01002424out:
Michal Kazior98563d52013-11-08 08:01:33 +01002425 ath10k_pci_sleep(ar);
Michal Kazior5b2589f2013-11-08 08:01:30 +01002426 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002427}
2428
Michal Kazior5b2589f2013-11-08 08:01:30 +01002429static int ath10k_pci_device_reset(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002430{
Michal Kazior5b2589f2013-11-08 08:01:30 +01002431 int i, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002432 u32 val;
2433
Michal Kazior5b2589f2013-11-08 08:01:30 +01002434 ret = ath10k_do_pci_wake(ar);
2435 if (ret) {
2436 ath10k_err("failed to wake up target: %d\n",
2437 ret);
2438 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002439 }
2440
2441 /* Put Target, including PCIe, into RESET. */
Kalle Valoe479ed42013-09-01 10:01:53 +03002442 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002443 val |= 1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002444 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002445
2446 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Kalle Valoe479ed42013-09-01 10:01:53 +03002447 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03002448 RTC_STATE_COLD_RESET_MASK)
2449 break;
2450 msleep(1);
2451 }
2452
2453 /* Pull Target, including PCIe, out of RESET. */
2454 val &= ~1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002455 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002456
2457 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Kalle Valoe479ed42013-09-01 10:01:53 +03002458 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03002459 RTC_STATE_COLD_RESET_MASK))
2460 break;
2461 msleep(1);
2462 }
2463
Michal Kazior5b2589f2013-11-08 08:01:30 +01002464 ath10k_do_pci_sleep(ar);
2465 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002466}
2467
2468static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2469{
2470 int i;
2471
2472 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2473 if (!test_bit(i, ar_pci->features))
2474 continue;
2475
2476 switch (i) {
2477 case ATH10K_PCI_FEATURE_MSI_X:
Kalle Valo24cfade2013-09-08 17:55:50 +03002478 ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002479 break;
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002480 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
Kalle Valo24cfade2013-09-08 17:55:50 +03002481 ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002482 break;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002483 }
2484 }
2485}
2486
2487static int ath10k_pci_probe(struct pci_dev *pdev,
2488 const struct pci_device_id *pci_dev)
2489{
2490 void __iomem *mem;
2491 int ret = 0;
2492 struct ath10k *ar;
2493 struct ath10k_pci *ar_pci;
Kalle Valoe01ae682013-09-01 11:22:14 +03002494 u32 lcr_val, chip_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002495
2496 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2497
2498 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2499 if (ar_pci == NULL)
2500 return -ENOMEM;
2501
2502 ar_pci->pdev = pdev;
2503 ar_pci->dev = &pdev->dev;
2504
2505 switch (pci_dev->device) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03002506 case QCA988X_2_0_DEVICE_ID:
2507 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2508 break;
2509 default:
2510 ret = -ENODEV;
2511 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2512 goto err_ar_pci;
2513 }
2514
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002515 if (ath10k_target_ps)
2516 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2517
Kalle Valo5e3dd152013-06-12 20:52:10 +03002518 ath10k_pci_dump_features(ar_pci);
2519
Michal Kazior3a0861f2013-07-05 16:15:06 +03002520 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002521 if (!ar) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002522 ath10k_err("failed to create driver core\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002523 ret = -EINVAL;
2524 goto err_ar_pci;
2525 }
2526
Kalle Valo5e3dd152013-06-12 20:52:10 +03002527 ar_pci->ar = ar;
2528 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2529 atomic_set(&ar_pci->keep_awake_count, 0);
2530
2531 pci_set_drvdata(pdev, ar);
2532
2533 /*
2534 * Without any knowledge of the Host, the Target may have been reset or
2535 * power cycled and its Config Space may no longer reflect the PCI
2536 * address space that was assigned earlier by the PCI infrastructure.
2537 * Refresh it now.
2538 */
2539 ret = pci_assign_resource(pdev, BAR_NUM);
2540 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002541 ath10k_err("failed to assign PCI space: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002542 goto err_ar;
2543 }
2544
2545 ret = pci_enable_device(pdev);
2546 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002547 ath10k_err("failed to enable PCI device: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002548 goto err_ar;
2549 }
2550
2551 /* Request MMIO resources */
2552 ret = pci_request_region(pdev, BAR_NUM, "ath");
2553 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002554 ath10k_err("failed to request MMIO region: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002555 goto err_device;
2556 }
2557
2558 /*
2559 * Target structures have a limit of 32 bit DMA pointers.
2560 * DMA pointers can be wider than 32 bits by default on some systems.
2561 */
2562 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2563 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002564 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002565 goto err_region;
2566 }
2567
2568 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2569 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002570 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002571 goto err_region;
2572 }
2573
2574 /* Set bus master bit in PCI_COMMAND to enable DMA */
2575 pci_set_master(pdev);
2576
2577 /*
2578 * Temporary FIX: disable ASPM
2579 * Will be removed after the OTP is programmed
2580 */
2581 pci_read_config_dword(pdev, 0x80, &lcr_val);
2582 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2583
2584 /* Arrange for access to Target SoC registers. */
2585 mem = pci_iomap(pdev, BAR_NUM, 0);
2586 if (!mem) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002587 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002588 ret = -EIO;
2589 goto err_master;
2590 }
2591
2592 ar_pci->mem = mem;
2593
2594 spin_lock_init(&ar_pci->ce_lock);
2595
Kalle Valoe01ae682013-09-01 11:22:14 +03002596 ret = ath10k_do_pci_wake(ar);
2597 if (ret) {
2598 ath10k_err("Failed to get chip id: %d\n", ret);
Wei Yongjun12eb0872013-10-30 13:24:39 +08002599 goto err_iomap;
Kalle Valoe01ae682013-09-01 11:22:14 +03002600 }
2601
Kalle Valo233eb972013-10-16 16:46:11 +03002602 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
Kalle Valoe01ae682013-09-01 11:22:14 +03002603
2604 ath10k_do_pci_sleep(ar);
2605
Kalle Valo24cfade2013-09-08 17:55:50 +03002606 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2607
Kalle Valoe01ae682013-09-01 11:22:14 +03002608 ret = ath10k_core_register(ar, chip_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002609 if (ret) {
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002610 ath10k_err("failed to register driver core: %d\n", ret);
Michal Kazior32270b62013-08-02 09:15:47 +02002611 goto err_iomap;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002612 }
2613
2614 return 0;
2615
Kalle Valo5e3dd152013-06-12 20:52:10 +03002616err_iomap:
2617 pci_iounmap(pdev, mem);
2618err_master:
2619 pci_clear_master(pdev);
2620err_region:
2621 pci_release_region(pdev, BAR_NUM);
2622err_device:
2623 pci_disable_device(pdev);
2624err_ar:
Kalle Valo5e3dd152013-06-12 20:52:10 +03002625 ath10k_core_destroy(ar);
2626err_ar_pci:
2627 /* call HIF PCI free here */
2628 kfree(ar_pci);
2629
2630 return ret;
2631}
2632
2633static void ath10k_pci_remove(struct pci_dev *pdev)
2634{
2635 struct ath10k *ar = pci_get_drvdata(pdev);
2636 struct ath10k_pci *ar_pci;
2637
2638 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2639
2640 if (!ar)
2641 return;
2642
2643 ar_pci = ath10k_pci_priv(ar);
2644
2645 if (!ar_pci)
2646 return;
2647
2648 tasklet_kill(&ar_pci->msi_fw_err);
2649
2650 ath10k_core_unregister(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002651
Kalle Valo5e3dd152013-06-12 20:52:10 +03002652 pci_iounmap(pdev, ar_pci->mem);
2653 pci_release_region(pdev, BAR_NUM);
2654 pci_clear_master(pdev);
2655 pci_disable_device(pdev);
2656
2657 ath10k_core_destroy(ar);
2658 kfree(ar_pci);
2659}
2660
Kalle Valo5e3dd152013-06-12 20:52:10 +03002661MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2662
2663static struct pci_driver ath10k_pci_driver = {
2664 .name = "ath10k_pci",
2665 .id_table = ath10k_pci_id_table,
2666 .probe = ath10k_pci_probe,
2667 .remove = ath10k_pci_remove,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002668};
2669
2670static int __init ath10k_pci_init(void)
2671{
2672 int ret;
2673
2674 ret = pci_register_driver(&ath10k_pci_driver);
2675 if (ret)
Michal Kazior1d2b48d2013-11-08 08:01:34 +01002676 ath10k_err("failed to register PCI driver: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002677
2678 return ret;
2679}
2680module_init(ath10k_pci_init);
2681
2682static void __exit ath10k_pci_exit(void)
2683{
2684 pci_unregister_driver(&ath10k_pci_driver);
2685}
2686
2687module_exit(ath10k_pci_exit);
2688
2689MODULE_AUTHOR("Qualcomm Atheros");
2690MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2691MODULE_LICENSE("Dual BSD/GPL");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002692MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2693MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2694MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);