blob: 405d350f870829541ff351f406341699ac4b018a [file] [log] [blame]
Eugene Krasnikov8e84c252013-10-08 21:25:58 +01001/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/* DXE - DMA transfer engine
18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19 * through low channels data packets are transfered
20 * through high channels managment packets are transfered
21 */
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/interrupt.h>
Bjorn Anderssonf303a932017-01-11 16:32:18 +020026#include <linux/soc/qcom/smem_state.h>
Eugene Krasnikov8e84c252013-10-08 21:25:58 +010027#include "wcn36xx.h"
28#include "txrx.h"
29
Bjorn Andersson05ddce42016-06-19 23:19:45 -070030static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
31{
32 wcn36xx_dbg(WCN36XX_DBG_DXE,
33 "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
34 addr, data);
35
36 writel(data, wcn->ccu_base + addr);
37}
38
Eugene Krasnikov8e84c252013-10-08 21:25:58 +010039static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
40{
41 wcn36xx_dbg(WCN36XX_DBG_DXE,
42 "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
43 addr, data);
44
Bjorn Andersson05ddce42016-06-19 23:19:45 -070045 writel(data, wcn->dxe_base + addr);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +010046}
47
48static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
49{
Bjorn Andersson05ddce42016-06-19 23:19:45 -070050 *data = readl(wcn->dxe_base + addr);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +010051
52 wcn36xx_dbg(WCN36XX_DBG_DXE,
53 "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
54 addr, *data);
55}
56
57static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
58{
59 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
60 int i;
61
62 for (i = 0; i < ch->desc_num && ctl; i++) {
63 next = ctl->next;
64 kfree(ctl);
65 ctl = next;
66 }
67}
68
69static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
70{
71 struct wcn36xx_dxe_ctl *prev_ctl = NULL;
72 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
73 int i;
74
Bob Copeland8e8e54c2015-10-24 13:42:15 -040075 spin_lock_init(&ch->lock);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +010076 for (i = 0; i < ch->desc_num; i++) {
77 cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
78 if (!cur_ctl)
79 goto out_fail;
80
Bob Copeland90dccb72015-01-09 14:15:49 -050081 spin_lock_init(&cur_ctl->skb_lock);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +010082 cur_ctl->ctl_blk_order = i;
83 if (i == 0) {
84 ch->head_blk_ctl = cur_ctl;
85 ch->tail_blk_ctl = cur_ctl;
86 } else if (ch->desc_num - 1 == i) {
87 prev_ctl->next = cur_ctl;
88 cur_ctl->next = ch->head_blk_ctl;
89 } else {
90 prev_ctl->next = cur_ctl;
91 }
92 prev_ctl = cur_ctl;
93 }
94
95 return 0;
96
97out_fail:
98 wcn36xx_dxe_free_ctl_block(ch);
99 return -ENOMEM;
100}
101
102int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
103{
104 int ret;
105
106 wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
107 wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
108 wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
109 wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
110
111 wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
112 wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
113 wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
114 wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
115
116 wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L;
117 wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H;
118
119 wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
120 wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
121
122 wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
123 wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
124
125 wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
126 wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
127
128 wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
129 wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
130
131 /* DXE control block allocation */
132 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
133 if (ret)
134 goto out_err;
135 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
136 if (ret)
137 goto out_err;
138 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
139 if (ret)
140 goto out_err;
141 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
142 if (ret)
143 goto out_err;
144
145 /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
Bjorn Anderssonf303a932017-01-11 16:32:18 +0200146 ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
147 WCN36XX_SMSM_WLAN_TX_ENABLE |
148 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
149 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
150 if (ret)
151 goto out_err;
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100152
153 return 0;
154
155out_err:
156 wcn36xx_err("Failed to allocate DXE control blocks\n");
157 wcn36xx_dxe_free_ctl_blks(wcn);
158 return -ENOMEM;
159}
160
161void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
162{
163 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
164 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
165 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
166 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
167}
168
yfw07225522015-10-26 10:36:22 +0800169static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100170{
171 struct wcn36xx_dxe_desc *cur_dxe = NULL;
172 struct wcn36xx_dxe_desc *prev_dxe = NULL;
173 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
174 size_t size;
175 int i;
176
177 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
yfw07225522015-10-26 10:36:22 +0800178 wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100179 GFP_KERNEL);
180 if (!wcn_ch->cpu_addr)
181 return -ENOMEM;
182
183 memset(wcn_ch->cpu_addr, 0, size);
184
185 cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
186 cur_ctl = wcn_ch->head_blk_ctl;
187
188 for (i = 0; i < wcn_ch->desc_num; i++) {
189 cur_ctl->desc = cur_dxe;
190 cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
191 i * sizeof(struct wcn36xx_dxe_desc);
192
193 switch (wcn_ch->ch_type) {
194 case WCN36XX_DXE_CH_TX_L:
195 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
196 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
197 break;
198 case WCN36XX_DXE_CH_TX_H:
199 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
200 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
201 break;
202 case WCN36XX_DXE_CH_RX_L:
203 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
204 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
205 break;
206 case WCN36XX_DXE_CH_RX_H:
207 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
208 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
209 break;
210 }
211 if (0 == i) {
212 cur_dxe->phy_next_l = 0;
213 } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
214 prev_dxe->phy_next_l =
215 cur_ctl->desc_phy_addr;
216 } else if (i == (wcn_ch->desc_num - 1)) {
217 prev_dxe->phy_next_l =
218 cur_ctl->desc_phy_addr;
219 cur_dxe->phy_next_l =
220 wcn_ch->head_blk_ctl->desc_phy_addr;
221 }
222 cur_ctl = cur_ctl->next;
223 prev_dxe = cur_dxe;
224 cur_dxe++;
225 }
226
227 return 0;
228}
229
Ramon Friedd0bb9502018-01-23 17:20:13 +0200230static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
231{
232 size_t size;
233
234 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
235 dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
236}
237
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100238static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
239 struct wcn36xx_dxe_mem_pool *pool)
240{
241 int i, chunk_size = pool->chunk_size;
242 dma_addr_t bd_phy_addr = pool->phy_addr;
243 void *bd_cpu_addr = pool->virt_addr;
244 struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
245
246 for (i = 0; i < ch->desc_num; i++) {
247 /* Only every second dxe needs a bd pointer,
248 the other will point to the skb data */
249 if (!(i & 1)) {
250 cur->bd_phy_addr = bd_phy_addr;
251 cur->bd_cpu_addr = bd_cpu_addr;
252 bd_phy_addr += chunk_size;
253 bd_cpu_addr += chunk_size;
254 } else {
255 cur->bd_phy_addr = 0;
256 cur->bd_cpu_addr = NULL;
257 }
258 cur = cur->next;
259 }
260}
261
262static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
263{
264 int reg_data = 0;
265
266 wcn36xx_dxe_read_register(wcn,
267 WCN36XX_DXE_INT_MASK_REG,
268 &reg_data);
269
270 reg_data |= wcn_ch;
271
272 wcn36xx_dxe_write_register(wcn,
273 WCN36XX_DXE_INT_MASK_REG,
274 (int)reg_data);
275 return 0;
276}
277
yfw07225522015-10-26 10:36:22 +0800278static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl)
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100279{
280 struct wcn36xx_dxe_desc *dxe = ctl->desc;
281 struct sk_buff *skb;
282
283 skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
284 if (skb == NULL)
285 return -ENOMEM;
286
yfw07225522015-10-26 10:36:22 +0800287 dxe->dst_addr_l = dma_map_single(dev,
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100288 skb_tail_pointer(skb),
289 WCN36XX_PKT_SIZE,
290 DMA_FROM_DEVICE);
Rob Clark4165cf72017-07-03 09:38:45 -0400291 if (dma_mapping_error(dev, dxe->dst_addr_l)) {
292 dev_err(dev, "unable to map skb\n");
293 kfree_skb(skb);
294 return -ENOMEM;
295 }
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100296 ctl->skb = skb;
297
298 return 0;
299}
300
301static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
302 struct wcn36xx_dxe_ch *wcn_ch)
303{
304 int i;
305 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
306
307 cur_ctl = wcn_ch->head_blk_ctl;
308
309 for (i = 0; i < wcn_ch->desc_num; i++) {
yfw07225522015-10-26 10:36:22 +0800310 wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100311 cur_ctl = cur_ctl->next;
312 }
313
314 return 0;
315}
316
317static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
318 struct wcn36xx_dxe_ch *wcn_ch)
319{
320 struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
321 int i;
322
323 for (i = 0; i < wcn_ch->desc_num; i++) {
324 kfree_skb(cur->skb);
325 cur = cur->next;
326 }
327}
328
329void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
330{
331 struct ieee80211_tx_info *info;
332 struct sk_buff *skb;
333 unsigned long flags;
334
335 spin_lock_irqsave(&wcn->dxe_lock, flags);
336 skb = wcn->tx_ack_skb;
337 wcn->tx_ack_skb = NULL;
338 spin_unlock_irqrestore(&wcn->dxe_lock, flags);
339
340 if (!skb) {
341 wcn36xx_warn("Spurious TX complete indication\n");
342 return;
343 }
344
345 info = IEEE80211_SKB_CB(skb);
346
347 if (status == 1)
348 info->flags |= IEEE80211_TX_STAT_ACK;
349
350 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
351
352 ieee80211_tx_status_irqsafe(wcn->hw, skb);
353 ieee80211_wake_queues(wcn->hw);
354}
355
356static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
357{
Bob Copeland8e8e54c2015-10-24 13:42:15 -0400358 struct wcn36xx_dxe_ctl *ctl;
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100359 struct ieee80211_tx_info *info;
360 unsigned long flags;
361
362 /*
363 * Make at least one loop of do-while because in case ring is
364 * completely full head and tail are pointing to the same element
365 * and while-do will not make any cycles.
366 */
Bob Copeland8e8e54c2015-10-24 13:42:15 -0400367 spin_lock_irqsave(&ch->lock, flags);
368 ctl = ch->tail_blk_ctl;
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100369 do {
Ramon Fried6ced7952018-03-04 18:31:34 +0200370 if (ctl->desc->ctrl & WCN36xx_DXE_CTRL_VLD)
Bob Copelandbfa66962015-01-09 14:15:48 -0500371 break;
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100372 if (ctl->skb) {
yfw07225522015-10-26 10:36:22 +0800373 dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100374 ctl->skb->len, DMA_TO_DEVICE);
375 info = IEEE80211_SKB_CB(ctl->skb);
376 if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
377 /* Keep frame until TX status comes */
378 ieee80211_free_txskb(wcn->hw, ctl->skb);
379 }
Bob Copeland8e8e54c2015-10-24 13:42:15 -0400380 spin_lock(&ctl->skb_lock);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100381 if (wcn->queues_stopped) {
382 wcn->queues_stopped = false;
383 ieee80211_wake_queues(wcn->hw);
384 }
Bob Copeland8e8e54c2015-10-24 13:42:15 -0400385 spin_unlock(&ctl->skb_lock);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100386
387 ctl->skb = NULL;
388 }
389 ctl = ctl->next;
390 } while (ctl != ch->head_blk_ctl &&
Ramon Fried6ced7952018-03-04 18:31:34 +0200391 !(ctl->desc->ctrl & WCN36xx_DXE_CTRL_VLD));
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100392
393 ch->tail_blk_ctl = ctl;
Bob Copeland8e8e54c2015-10-24 13:42:15 -0400394 spin_unlock_irqrestore(&ch->lock, flags);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100395}
396
397static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
398{
399 struct wcn36xx *wcn = (struct wcn36xx *)dev;
400 int int_src, int_reason;
401
402 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
403
404 if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
405 wcn36xx_dxe_read_register(wcn,
406 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
407 &int_reason);
408
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100409 wcn36xx_dxe_write_register(wcn,
410 WCN36XX_DXE_0_INT_CLR,
411 WCN36XX_INT_MASK_CHAN_TX_H);
412
Ramon Fried6767b302018-03-11 14:01:43 +0200413 if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
414 wcn36xx_dxe_write_register(wcn,
415 WCN36XX_DXE_0_INT_ERR_CLR,
416 WCN36XX_INT_MASK_CHAN_TX_H);
417
418 wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n",
419 int_src);
420 }
421
422 if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
423 wcn36xx_dxe_write_register(wcn,
424 WCN36XX_DXE_0_INT_DONE_CLR,
425 WCN36XX_INT_MASK_CHAN_TX_H);
426 }
427
428 if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
429 wcn36xx_dxe_write_register(wcn,
430 WCN36XX_DXE_0_INT_ED_CLR,
431 WCN36XX_INT_MASK_CHAN_TX_H);
432 }
433
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100434 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
435 reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
436 }
437
438 if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
439 wcn36xx_dxe_read_register(wcn,
440 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
441 &int_reason);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100442
443 wcn36xx_dxe_write_register(wcn,
444 WCN36XX_DXE_0_INT_CLR,
445 WCN36XX_INT_MASK_CHAN_TX_L);
446
Ramon Fried6767b302018-03-11 14:01:43 +0200447
448 if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
449 wcn36xx_dxe_write_register(wcn,
450 WCN36XX_DXE_0_INT_ERR_CLR,
451 WCN36XX_INT_MASK_CHAN_TX_L);
452
453 wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n",
454 int_src);
455 }
456
457 if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
458 wcn36xx_dxe_write_register(wcn,
459 WCN36XX_DXE_0_INT_DONE_CLR,
460 WCN36XX_INT_MASK_CHAN_TX_L);
461 }
462
463 if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
464 wcn36xx_dxe_write_register(wcn,
465 WCN36XX_DXE_0_INT_ED_CLR,
466 WCN36XX_INT_MASK_CHAN_TX_L);
467 }
468
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100469 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
470 reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
471 }
472
473 return IRQ_HANDLED;
474}
475
476static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
477{
478 struct wcn36xx *wcn = (struct wcn36xx *)dev;
479
480 disable_irq_nosync(wcn->rx_irq);
481 wcn36xx_dxe_rx_frame(wcn);
482 enable_irq(wcn->rx_irq);
483 return IRQ_HANDLED;
484}
485
486static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
487{
488 int ret;
489
490 ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
491 IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
492 if (ret) {
493 wcn36xx_err("failed to alloc tx irq\n");
494 goto out_err;
495 }
496
497 ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
498 "wcn36xx_rx", wcn);
499 if (ret) {
500 wcn36xx_err("failed to alloc rx irq\n");
501 goto out_txirq;
502 }
503
504 enable_irq_wake(wcn->rx_irq);
505
506 return 0;
507
508out_txirq:
509 free_irq(wcn->tx_irq, wcn);
510out_err:
511 return ret;
512
513}
514
515static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
516 struct wcn36xx_dxe_ch *ch)
517{
518 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
519 struct wcn36xx_dxe_desc *dxe = ctl->desc;
520 dma_addr_t dma_addr;
521 struct sk_buff *skb;
Fengwei Yin2ec77522015-12-20 21:20:40 +0800522 int ret = 0, int_mask;
523 u32 value;
524
525 if (ch->ch_type == WCN36XX_DXE_CH_RX_L) {
526 value = WCN36XX_DXE_CTRL_RX_L;
527 int_mask = WCN36XX_DXE_INT_CH1_MASK;
528 } else {
529 value = WCN36XX_DXE_CTRL_RX_H;
530 int_mask = WCN36XX_DXE_INT_CH3_MASK;
531 }
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100532
Ramon Fried6ced7952018-03-04 18:31:34 +0200533 while (!(dxe->ctrl & WCN36xx_DXE_CTRL_VLD)) {
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100534 skb = ctl->skb;
535 dma_addr = dxe->dst_addr_l;
Fengwei Yin2ec77522015-12-20 21:20:40 +0800536 ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl);
537 if (0 == ret) {
538 /* new skb allocation ok. Use the new one and queue
539 * the old one to network system.
540 */
541 dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
542 DMA_FROM_DEVICE);
543 wcn36xx_rx_skb(wcn, skb);
544 } /* else keep old skb not submitted and use it for rx DMA */
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100545
Fengwei Yin2ec77522015-12-20 21:20:40 +0800546 dxe->ctrl = value;
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100547 ctl = ctl->next;
548 dxe = ctl->desc;
549 }
Fengwei Yin9d5db232015-12-20 21:20:41 +0800550 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, int_mask);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100551
552 ch->head_blk_ctl = ctl;
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100553 return 0;
554}
555
556void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
557{
558 int int_src;
559
560 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
561
562 /* RX_LOW_PRI */
563 if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
564 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
565 WCN36XX_DXE_INT_CH1_MASK);
566 wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
567 }
568
569 /* RX_HIGH_PRI */
570 if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
571 /* Clean up all the INT within this channel */
572 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
573 WCN36XX_DXE_INT_CH3_MASK);
574 wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
575 }
576
577 if (!int_src)
578 wcn36xx_warn("No DXE interrupt pending\n");
579}
580
581int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
582{
583 size_t s;
584 void *cpu_addr;
585
586 /* Allocate BD headers for MGMT frames */
587
588 /* Where this come from ask QC */
589 wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
590 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
591
592 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
yfw07225522015-10-26 10:36:22 +0800593 cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr,
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100594 GFP_KERNEL);
595 if (!cpu_addr)
596 goto out_err;
597
598 wcn->mgmt_mem_pool.virt_addr = cpu_addr;
599 memset(cpu_addr, 0, s);
600
601 /* Allocate BD headers for DATA frames */
602
603 /* Where this come from ask QC */
604 wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
605 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
606
607 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
yfw07225522015-10-26 10:36:22 +0800608 cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr,
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100609 GFP_KERNEL);
610 if (!cpu_addr)
611 goto out_err;
612
613 wcn->data_mem_pool.virt_addr = cpu_addr;
614 memset(cpu_addr, 0, s);
615
616 return 0;
617
618out_err:
619 wcn36xx_dxe_free_mem_pools(wcn);
620 wcn36xx_err("Failed to allocate BD mempool\n");
621 return -ENOMEM;
622}
623
624void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
625{
626 if (wcn->mgmt_mem_pool.virt_addr)
yfw07225522015-10-26 10:36:22 +0800627 dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100628 WCN36XX_DXE_CH_DESC_NUMB_TX_H,
629 wcn->mgmt_mem_pool.virt_addr,
630 wcn->mgmt_mem_pool.phy_addr);
631
632 if (wcn->data_mem_pool.virt_addr) {
yfw07225522015-10-26 10:36:22 +0800633 dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100634 WCN36XX_DXE_CH_DESC_NUMB_TX_L,
635 wcn->data_mem_pool.virt_addr,
636 wcn->data_mem_pool.phy_addr);
637 }
638}
639
640int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
641 struct wcn36xx_vif *vif_priv,
Loic Poulaine5f99082018-03-27 11:26:57 +0300642 struct wcn36xx_tx_bd *bd,
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100643 struct sk_buff *skb,
644 bool is_low)
645{
646 struct wcn36xx_dxe_ctl *ctl = NULL;
647 struct wcn36xx_dxe_desc *desc = NULL;
648 struct wcn36xx_dxe_ch *ch = NULL;
649 unsigned long flags;
Bob Copeland8e8e54c2015-10-24 13:42:15 -0400650 int ret;
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100651
652 ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
653
Bob Copeland8e8e54c2015-10-24 13:42:15 -0400654 spin_lock_irqsave(&ch->lock, flags);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100655 ctl = ch->head_blk_ctl;
656
Bob Copeland8e8e54c2015-10-24 13:42:15 -0400657 spin_lock(&ctl->next->skb_lock);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100658
659 /*
660 * If skb is not null that means that we reached the tail of the ring
661 * hence ring is full. Stop queues to let mac80211 back off until ring
662 * has an empty slot again.
663 */
664 if (NULL != ctl->next->skb) {
665 ieee80211_stop_queues(wcn->hw);
666 wcn->queues_stopped = true;
Bob Copeland8e8e54c2015-10-24 13:42:15 -0400667 spin_unlock(&ctl->next->skb_lock);
668 spin_unlock_irqrestore(&ch->lock, flags);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100669 return -EBUSY;
670 }
Bob Copeland8e8e54c2015-10-24 13:42:15 -0400671 spin_unlock(&ctl->next->skb_lock);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100672
673 ctl->skb = NULL;
674 desc = ctl->desc;
675
Loic Poulaine5f99082018-03-27 11:26:57 +0300676 /* write buffer descriptor */
677 memcpy(ctl->bd_cpu_addr, bd, sizeof(*bd));
678
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100679 /* Set source address of the BD we send */
680 desc->src_addr_l = ctl->bd_phy_addr;
681
682 desc->dst_addr_l = ch->dxe_wq;
683 desc->fr_len = sizeof(struct wcn36xx_tx_bd);
684 desc->ctrl = ch->ctrl_bd;
685
686 wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
687
688 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
689 (char *)desc, sizeof(*desc));
690 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
691 "BD >>> ", (char *)ctl->bd_cpu_addr,
692 sizeof(struct wcn36xx_tx_bd));
693
694 /* Set source address of the SKB we send */
695 ctl = ctl->next;
696 ctl->skb = skb;
697 desc = ctl->desc;
698 if (ctl->bd_cpu_addr) {
699 wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
Bob Copeland8e8e54c2015-10-24 13:42:15 -0400700 ret = -EINVAL;
701 goto unlock;
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100702 }
703
yfw07225522015-10-26 10:36:22 +0800704 desc->src_addr_l = dma_map_single(wcn->dev,
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100705 ctl->skb->data,
706 ctl->skb->len,
707 DMA_TO_DEVICE);
Daniel Mack7cae3512018-04-03 18:51:52 +0200708 if (dma_mapping_error(wcn->dev, desc->src_addr_l)) {
709 dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
710 ret = -ENOMEM;
711 goto unlock;
712 }
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100713
714 desc->dst_addr_l = ch->dxe_wq;
715 desc->fr_len = ctl->skb->len;
716
717 /* set dxe descriptor to VALID */
718 desc->ctrl = ch->ctrl_skb;
719
720 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
721 (char *)desc, sizeof(*desc));
722 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
723 (char *)ctl->skb->data, ctl->skb->len);
724
725 /* Move the head of the ring to the next empty descriptor */
726 ch->head_blk_ctl = ctl->next;
727
728 /*
729 * When connected and trying to send data frame chip can be in sleep
730 * mode and writing to the register will not wake up the chip. Instead
731 * notify chip about new frame through SMSM bus.
732 */
733 if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
Bjorn Anderssonf303a932017-01-11 16:32:18 +0200734 qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
735 WCN36XX_SMSM_WLAN_TX_ENABLE,
736 WCN36XX_SMSM_WLAN_TX_ENABLE);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100737 } else {
738 /* indicate End Of Packet and generate interrupt on descriptor
739 * done.
740 */
741 wcn36xx_dxe_write_register(wcn,
742 ch->reg_ctrl, ch->def_ctrl);
743 }
744
Bob Copeland8e8e54c2015-10-24 13:42:15 -0400745 ret = 0;
746unlock:
747 spin_unlock_irqrestore(&ch->lock, flags);
748 return ret;
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100749}
750
751int wcn36xx_dxe_init(struct wcn36xx *wcn)
752{
753 int reg_data = 0, ret;
754
755 reg_data = WCN36XX_DXE_REG_RESET;
756 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
757
Bjorn Andersson6f10b4e2016-06-19 23:19:46 -0700758 /* Select channels for rx avail and xfer done interrupts... */
759 reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
760 WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
761 if (wcn->is_pronto)
762 wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
Bjorn Andersson05ddce42016-06-19 23:19:45 -0700763 else
Bjorn Andersson6f10b4e2016-06-19 23:19:46 -0700764 wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100765
766 /***************************************/
767 /* Init descriptors for TX LOW channel */
768 /***************************************/
Ramon Friedd0bb9502018-01-23 17:20:13 +0200769 ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
770 if (ret) {
771 dev_err(wcn->dev, "Error allocating descriptor\n");
772 return ret;
773 }
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100774 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
775
776 /* Write channel head to a NEXT register */
777 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
778 wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
779
780 /* Program DMA destination addr for TX LOW */
781 wcn36xx_dxe_write_register(wcn,
782 WCN36XX_DXE_CH_DEST_ADDR_TX_L,
783 WCN36XX_DXE_WQ_TX_L);
784
785 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
786 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
787
788 /***************************************/
789 /* Init descriptors for TX HIGH channel */
790 /***************************************/
Ramon Friedd0bb9502018-01-23 17:20:13 +0200791 ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
792 if (ret) {
793 dev_err(wcn->dev, "Error allocating descriptor\n");
794 goto out_err_txh_ch;
795 }
796
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100797 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
798
799 /* Write channel head to a NEXT register */
800 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
801 wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
802
803 /* Program DMA destination addr for TX HIGH */
804 wcn36xx_dxe_write_register(wcn,
805 WCN36XX_DXE_CH_DEST_ADDR_TX_H,
806 WCN36XX_DXE_WQ_TX_H);
807
808 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
809
810 /* Enable channel interrupts */
811 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
812
813 /***************************************/
814 /* Init descriptors for RX LOW channel */
815 /***************************************/
Ramon Friedd0bb9502018-01-23 17:20:13 +0200816 ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
817 if (ret) {
818 dev_err(wcn->dev, "Error allocating descriptor\n");
819 goto out_err_rxl_ch;
820 }
821
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100822
823 /* For RX we need to preallocated buffers */
824 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
825
826 /* Write channel head to a NEXT register */
827 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
828 wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
829
830 /* Write DMA source address */
831 wcn36xx_dxe_write_register(wcn,
832 WCN36XX_DXE_CH_SRC_ADDR_RX_L,
833 WCN36XX_DXE_WQ_RX_L);
834
835 /* Program preallocated destination address */
836 wcn36xx_dxe_write_register(wcn,
837 WCN36XX_DXE_CH_DEST_ADDR_RX_L,
838 wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
839
840 /* Enable default control registers */
841 wcn36xx_dxe_write_register(wcn,
842 WCN36XX_DXE_REG_CTL_RX_L,
843 WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
844
845 /* Enable channel interrupts */
846 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
847
848 /***************************************/
849 /* Init descriptors for RX HIGH channel */
850 /***************************************/
Ramon Friedd0bb9502018-01-23 17:20:13 +0200851 ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
852 if (ret) {
853 dev_err(wcn->dev, "Error allocating descriptor\n");
854 goto out_err_rxh_ch;
855 }
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100856
857 /* For RX we need to prealocat buffers */
858 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
859
860 /* Write chanel head to a NEXT register */
861 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
862 wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
863
864 /* Write DMA source address */
865 wcn36xx_dxe_write_register(wcn,
866 WCN36XX_DXE_CH_SRC_ADDR_RX_H,
867 WCN36XX_DXE_WQ_RX_H);
868
869 /* Program preallocated destination address */
870 wcn36xx_dxe_write_register(wcn,
871 WCN36XX_DXE_CH_DEST_ADDR_RX_H,
872 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
873
874 /* Enable default control registers */
875 wcn36xx_dxe_write_register(wcn,
876 WCN36XX_DXE_REG_CTL_RX_H,
877 WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
878
879 /* Enable channel interrupts */
880 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
881
882 ret = wcn36xx_dxe_request_irqs(wcn);
883 if (ret < 0)
Ramon Friedd0bb9502018-01-23 17:20:13 +0200884 goto out_err_irq;
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100885
886 return 0;
887
Ramon Friedd0bb9502018-01-23 17:20:13 +0200888out_err_irq:
889 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
890out_err_rxh_ch:
891 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
892out_err_rxl_ch:
893 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
894out_err_txh_ch:
895 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
896
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100897 return ret;
898}
899
900void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
901{
902 free_irq(wcn->tx_irq, wcn);
903 free_irq(wcn->rx_irq, wcn);
904
905 if (wcn->tx_ack_skb) {
906 ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
907 wcn->tx_ack_skb = NULL;
908 }
909
910 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
911 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
912}