blob: 7c4dae27931120c501c872396538755b70bd74cd [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +020061 int txok);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Sujithe8324352009-01-16 21:38:42 +0530107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
108{
109 struct ath_atx_ac *ac = tid->ac;
110
111 if (tid->paused)
112 return;
113
114 if (tid->sched)
115 return;
116
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
119
120 if (ac->sched)
121 return;
122
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
126
Sujithe8324352009-01-16 21:38:42 +0530127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128{
Felix Fietkau066dae92010-11-07 14:59:39 +0100129 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530130
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200131 WARN_ON(!tid->paused);
132
Sujithe8324352009-01-16 21:38:42 +0530133 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200134 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530135
Felix Fietkau56dc6332011-08-28 00:32:22 +0200136 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530137 goto unlock;
138
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
143}
144
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100151}
152
Sujithe8324352009-01-16 21:38:42 +0530153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
Felix Fietkau066dae92010-11-07 14:59:39 +0100155 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200156 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530157 struct ath_buf *bf;
158 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100160 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200161
Sujithe8324352009-01-16 21:38:42 +0530162 INIT_LIST_HEAD(&bf_head);
163
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530165 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530166
Felix Fietkau56dc6332011-08-28 00:32:22 +0200167 while ((skb = __skb_dequeue(&tid->buf_q))) {
168 fi = get_frame_info(skb);
169 bf = fi->bf;
170
Felix Fietkaue1566d12010-11-20 03:08:46 +0100171 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200172 if (bf && fi->retries) {
173 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200176 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200177 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200178 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100179 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530180 }
181
182 spin_unlock_bh(&txq->axq_lock);
183}
184
185static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
186 int seqno)
187{
188 int index, cindex;
189
190 index = ATH_BA_INDEX(tid->seq_start, seqno);
191 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
192
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200193 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530194
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200195 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530196 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
197 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
198 }
199}
200
201static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100202 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530203{
204 int index, cindex;
205
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100206 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530207 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200208 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530209
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
214 }
215}
216
217/*
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
222 */
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
225
226{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200227 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530228 struct ath_buf *bf;
229 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700230 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100231 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700232
233 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530234 INIT_LIST_HEAD(&bf_head);
235
Felix Fietkau56dc6332011-08-28 00:32:22 +0200236 while ((skb = __skb_dequeue(&tid->buf_q))) {
237 fi = get_frame_info(skb);
238 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530239
Felix Fietkau44f1d262011-08-28 00:32:25 +0200240 if (!bf) {
241 spin_unlock(&txq->axq_lock);
242 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
243 spin_lock(&txq->axq_lock);
244 continue;
245 }
246
Felix Fietkau56dc6332011-08-28 00:32:22 +0200247 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530248
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100249 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200250 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530251
252 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700253 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530254 spin_lock(&txq->axq_lock);
255 }
256
257 tid->seq_next = tid->seq_start;
258 tid->baw_tail = tid->baw_head;
259}
260
Sujithfec247c2009-07-27 12:08:16 +0530261static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100262 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530263{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100264 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200265 struct ath_buf *bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530266 struct ieee80211_hdr *hdr;
267
Sujithfec247c2009-07-27 12:08:16 +0530268 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100269 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100270 return;
Sujithe8324352009-01-16 21:38:42 +0530271
Sujithe8324352009-01-16 21:38:42 +0530272 hdr = (struct ieee80211_hdr *)skb->data;
273 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200274 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
275 sizeof(*hdr), DMA_TO_DEVICE);
Sujithe8324352009-01-16 21:38:42 +0530276}
277
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200278static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
279{
280 struct ath_buf *bf = NULL;
281
282 spin_lock_bh(&sc->tx.txbuflock);
283
284 if (unlikely(list_empty(&sc->tx.txbuf))) {
285 spin_unlock_bh(&sc->tx.txbuflock);
286 return NULL;
287 }
288
289 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
290 list_del(&bf->list);
291
292 spin_unlock_bh(&sc->tx.txbuflock);
293
294 return bf;
295}
296
297static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
298{
299 spin_lock_bh(&sc->tx.txbuflock);
300 list_add_tail(&bf->list, &sc->tx.txbuf);
301 spin_unlock_bh(&sc->tx.txbuflock);
302}
303
Sujithd43f30152009-01-16 21:38:53 +0530304static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
305{
306 struct ath_buf *tbf;
307
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200308 tbf = ath_tx_get_buffer(sc);
309 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530310 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530311
312 ATH_TXBUF_RESET(tbf);
313
314 tbf->bf_mpdu = bf->bf_mpdu;
315 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400316 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530317 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530318
319 return tbf;
320}
321
Felix Fietkaub572d032010-11-14 15:20:07 +0100322static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
323 struct ath_tx_status *ts, int txok,
324 int *nframes, int *nbad)
325{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100326 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100327 u16 seq_st = 0;
328 u32 ba[WME_BA_BMP_SIZE >> 5];
329 int ba_index;
330 int isaggr = 0;
331
332 *nbad = 0;
333 *nframes = 0;
334
Felix Fietkaub572d032010-11-14 15:20:07 +0100335 isaggr = bf_isaggr(bf);
336 if (isaggr) {
337 seq_st = ts->ts_seqnum;
338 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
339 }
340
341 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100342 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200343 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100344
345 (*nframes)++;
346 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
347 (*nbad)++;
348
349 bf = bf->bf_next;
350 }
351}
352
353
Sujithd43f30152009-01-16 21:38:53 +0530354static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
355 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100356 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530357{
358 struct ath_node *an = NULL;
359 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530360 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100361 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530362 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800363 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530364 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530365 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200366 struct list_head bf_head;
367 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530368 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530369 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530370 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
371 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200372 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100373 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200374 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100375 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200376 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530377
Sujitha22be222009-03-30 15:28:36 +0530378 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530379 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530380
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800381 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800382
Felix Fietkau78c46532010-06-25 01:26:16 +0200383 memcpy(rates, tx_info->control.rates, sizeof(rates));
384
Sujith1286ec62009-01-27 13:30:37 +0530385 rcu_read_lock();
386
Ben Greear686b9cb2010-09-23 09:44:36 -0700387 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530388 if (!sta) {
389 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200390
Felix Fietkau31e79a52010-07-12 23:16:34 +0200391 INIT_LIST_HEAD(&bf_head);
392 while (bf) {
393 bf_next = bf->bf_next;
394
Felix Fietkaufce041b2011-05-19 12:20:25 +0200395 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200396 list_move_tail(&bf->list, &bf_head);
397
Felix Fietkau31e79a52010-07-12 23:16:34 +0200398 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
399 0, 0);
400
401 bf = bf_next;
402 }
Sujith1286ec62009-01-27 13:30:37 +0530403 return;
Sujithe8324352009-01-16 21:38:42 +0530404 }
405
Sujith1286ec62009-01-27 13:30:37 +0530406 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100407 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
408 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530409
Felix Fietkaub11b1602010-07-11 12:48:44 +0200410 /*
411 * The hardware occasionally sends a tx status for the wrong TID.
412 * In this case, the BA status cannot be considered valid and all
413 * subframes need to be retransmitted
414 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100415 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200416 txok = false;
417
Sujithe8324352009-01-16 21:38:42 +0530418 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530419 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530420
Sujithd43f30152009-01-16 21:38:53 +0530421 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700422 if (ts->ts_flags & ATH9K_TX_BA) {
423 seq_st = ts->ts_seqnum;
424 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530425 } else {
Sujithd43f30152009-01-16 21:38:53 +0530426 /*
427 * AR5416 can become deaf/mute when BA
428 * issue happens. Chip needs to be reset.
429 * But AP code may have sychronization issues
430 * when perform internal reset in this routine.
431 * Only enable reset in STA mode for now.
432 */
Sujith2660b812009-02-09 13:27:26 +0530433 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530434 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530435 }
436 }
437
Felix Fietkau56dc6332011-08-28 00:32:22 +0200438 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530439
Felix Fietkaub572d032010-11-14 15:20:07 +0100440 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530441 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200442 u16 seqno = bf->bf_state.seqno;
443
Felix Fietkauf0b82202011-01-15 14:30:15 +0100444 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530445 bf_next = bf->bf_next;
446
Felix Fietkau78c46532010-06-25 01:26:16 +0200447 skb = bf->bf_mpdu;
448 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100449 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200450
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200451 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530452 /* transmit completion, subframe is
453 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530454 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530455 } else if (!isaggr && txok) {
456 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530457 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530458 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200459 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530460 /*
461 * cleanup in progress, just fail
462 * the un-acked sub-frames
463 */
464 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200465 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
466 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
467 !an->sleeping)
468 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
469
470 clear_filter = true;
471 txpending = 1;
472 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200473 txfail = 1;
474 sendbar = 1;
475 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530476 }
477 }
478
Felix Fietkaufce041b2011-05-19 12:20:25 +0200479 /*
480 * Make sure the last desc is reclaimed if it
481 * not a holding desc.
482 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200483 INIT_LIST_HEAD(&bf_head);
484 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
485 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530486 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530487
Felix Fietkau90fa5392010-09-20 13:45:38 +0200488 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530489 /*
490 * complete the acked-ones/xretried ones; update
491 * block-ack window
492 */
493 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200494 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530495 spin_unlock_bh(&txq->axq_lock);
496
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530497 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200498 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau3afd21e2011-09-14 21:24:26 +0200499 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530500 rc_update = false;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530501 }
502
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700503 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
504 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530505 } else {
Sujithd43f30152009-01-16 21:38:53 +0530506 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400507 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
508 if (bf->bf_next == NULL && bf_last->bf_stale) {
509 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530510
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400511 tbf = ath_clone_txbuf(sc, bf_last);
512 /*
513 * Update tx baw and complete the
514 * frame with failed status if we
515 * run out of tx buf.
516 */
517 if (!tbf) {
518 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200519 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400520 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400521
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400522 ath_tx_complete_buf(sc, bf, txq,
523 &bf_head,
Felix Fietkau55797b12011-09-14 21:24:16 +0200524 ts, 0, 1);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400525 break;
526 }
527
Felix Fietkau56dc6332011-08-28 00:32:22 +0200528 fi->bf = tbf;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400529 }
Sujithe8324352009-01-16 21:38:42 +0530530 }
531
532 /*
533 * Put this buffer to the temporary pending
534 * queue to retain ordering
535 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200536 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530537 }
538
539 bf = bf_next;
540 }
541
Felix Fietkau4cee7862010-07-23 03:53:16 +0200542 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200543 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200544 if (an->sleeping)
Johannes Berg042ec452011-09-29 16:04:26 +0200545 ieee80211_sta_set_buffered(sta, tid->tidno, true);
Felix Fietkau55195412011-04-17 23:28:09 +0200546
Felix Fietkau4cee7862010-07-23 03:53:16 +0200547 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200548 if (clear_filter)
549 tid->ac->clear_ps_filter = true;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200550 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600551 if (!an->sleeping)
552 ath_tx_queue_tid(txq, tid);
Felix Fietkau4cee7862010-07-23 03:53:16 +0200553 spin_unlock_bh(&txq->axq_lock);
554 }
555
Sujithe8324352009-01-16 21:38:42 +0530556 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200557 ath_tx_flush_tid(sc, tid);
558
Sujithe8324352009-01-16 21:38:42 +0530559 if (tid->baw_head == tid->baw_tail) {
560 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530561 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530562 }
Sujithe8324352009-01-16 21:38:42 +0530563 }
564
Sujith1286ec62009-01-27 13:30:37 +0530565 rcu_read_unlock();
566
Felix Fietkau030d6292011-10-07 02:28:13 +0200567 if (needreset) {
568 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
Felix Fietkau236de512011-09-03 01:40:25 +0200569 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Felix Fietkau030d6292011-10-07 02:28:13 +0200570 }
Sujithe8324352009-01-16 21:38:42 +0530571}
572
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530573static bool ath_lookup_legacy(struct ath_buf *bf)
574{
575 struct sk_buff *skb;
576 struct ieee80211_tx_info *tx_info;
577 struct ieee80211_tx_rate *rates;
578 int i;
579
580 skb = bf->bf_mpdu;
581 tx_info = IEEE80211_SKB_CB(skb);
582 rates = tx_info->control.rates;
583
Felix Fietkau059ee092011-08-27 10:25:27 +0200584 for (i = 0; i < 4; i++) {
585 if (!rates[i].count || rates[i].idx < 0)
586 break;
587
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530588 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
589 return true;
590 }
591
592 return false;
593}
594
Sujithe8324352009-01-16 21:38:42 +0530595static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
596 struct ath_atx_tid *tid)
597{
Sujithe8324352009-01-16 21:38:42 +0530598 struct sk_buff *skb;
599 struct ieee80211_tx_info *tx_info;
600 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530601 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530602 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530603 int i;
604
Sujitha22be222009-03-30 15:28:36 +0530605 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530606 tx_info = IEEE80211_SKB_CB(skb);
607 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530608
609 /*
610 * Find the lowest frame length among the rate series that will have a
611 * 4ms transmit duration.
612 * TODO - TXOP limit needs to be considered.
613 */
614 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
615
616 for (i = 0; i < 4; i++) {
617 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100618 int modeidx;
619 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530620 legacy = 1;
621 break;
622 }
623
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200624 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100625 modeidx = MCS_HT40;
626 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200627 modeidx = MCS_HT20;
628
629 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
630 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100631
632 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530633 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530634 }
635 }
636
637 /*
638 * limit aggregate size by the minimum rate if rate selected is
639 * not a probe rate, if rate selected is a probe rate then
640 * avoid aggregation of this packet.
641 */
642 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
643 return 0;
644
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530645 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
646 aggr_limit = min((max_4ms_framelen * 3) / 8,
647 (u32)ATH_AMPDU_LIMIT_MAX);
648 else
649 aggr_limit = min(max_4ms_framelen,
650 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530651
652 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300653 * h/w can accept aggregates up to 16 bit lengths (65535).
654 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530655 * as zero. Ignore 65536 since we are constrained by hw.
656 */
Sujith4ef70842009-07-23 15:32:41 +0530657 if (tid->an->maxampdu)
658 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530659
660 return aggr_limit;
661}
662
663/*
Sujithd43f30152009-01-16 21:38:53 +0530664 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530665 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530666 */
667static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530668 struct ath_buf *bf, u16 frmlen,
669 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530670{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530671#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530672 struct sk_buff *skb = bf->bf_mpdu;
673 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530674 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530675 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100676 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200677 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100678 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530679
680 /* Select standard number of delimiters based on frame length alone */
681 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
682
683 /*
684 * If encryption enabled, hardware requires some more padding between
685 * subframes.
686 * TODO - this could be improved to be dependent on the rate.
687 * The hardware can keep up at lower rates, but not higher rates
688 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530689 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
690 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530691 ndelim += ATH_AGGR_ENCRYPTDELIM;
692
693 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530694 * Add delimiter when using RTS/CTS with aggregation
695 * and non enterprise AR9003 card
696 */
Felix Fietkau34597312011-08-29 18:57:54 +0200697 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
698 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530699 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
700
701 /*
Sujithe8324352009-01-16 21:38:42 +0530702 * Convert desired mpdu density from microeconds to bytes based
703 * on highest rate in rate series (i.e. first rate) to determine
704 * required minimum length for subframe. Take into account
705 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530706 *
Sujithe8324352009-01-16 21:38:42 +0530707 * If there is no mpdu density restriction, no further calculation
708 * is needed.
709 */
Sujith4ef70842009-07-23 15:32:41 +0530710
711 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530712 return ndelim;
713
714 rix = tx_info->control.rates[0].idx;
715 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530716 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
717 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
718
719 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530720 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530721 else
Sujith4ef70842009-07-23 15:32:41 +0530722 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530723
724 if (nsymbols == 0)
725 nsymbols = 1;
726
Felix Fietkauc6663872010-04-19 19:57:33 +0200727 streams = HT_RC_2_STREAMS(rix);
728 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530729 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
730
Sujithe8324352009-01-16 21:38:42 +0530731 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530732 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
733 ndelim = max(mindelim, ndelim);
734 }
735
736 return ndelim;
737}
738
739static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530740 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530741 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100742 struct list_head *bf_q,
743 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530744{
745#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200746 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530747 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530748 u16 aggr_limit = 0, al = 0, bpad = 0,
749 al_delta, h_baw = tid->baw_size / 2;
750 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200751 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100752 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200753 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200754 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530755
756 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200757 skb = skb_peek(&tid->buf_q);
758 fi = get_frame_info(skb);
759 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200760 if (!fi->bf)
761 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200762
Felix Fietkau44f1d262011-08-28 00:32:25 +0200763 if (!bf)
764 continue;
765
Felix Fietkau399c6482011-09-14 21:24:17 +0200766 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200767 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200768 if (!bf_first)
769 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530770
Sujithd43f30152009-01-16 21:38:53 +0530771 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200772 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530773 status = ATH_AGGR_BAW_CLOSED;
774 break;
775 }
776
777 if (!rl) {
778 aggr_limit = ath_lookup_rate(sc, bf, tid);
779 rl = 1;
780 }
781
Sujithd43f30152009-01-16 21:38:53 +0530782 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100783 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530784
Sujithd43f30152009-01-16 21:38:53 +0530785 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530786 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
787 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530788 status = ATH_AGGR_LIMITED;
789 break;
790 }
791
Felix Fietkau0299a502010-10-21 02:47:24 +0200792 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200793 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200794 break;
795
Sujithd43f30152009-01-16 21:38:53 +0530796 /* do not exceed subframe limit */
797 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530798 status = ATH_AGGR_LIMITED;
799 break;
800 }
801
Sujithd43f30152009-01-16 21:38:53 +0530802 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530803 al += bpad + al_delta;
804
805 /*
806 * Get the delimiters needed to meet the MPDU
807 * density for this node.
808 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530809 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
810 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530811 bpad = PADBYTES(al_delta) + (ndelim << 2);
812
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530813 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530814 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530815
Sujithd43f30152009-01-16 21:38:53 +0530816 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100817 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200818 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200819 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200820
821 __skb_unlink(skb, &tid->buf_q);
822 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200823 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530824 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200825
Sujithe8324352009-01-16 21:38:42 +0530826 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530827
Felix Fietkau56dc6332011-08-28 00:32:22 +0200828 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530829
Felix Fietkau269c44b2010-11-14 15:20:06 +0100830 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530831
Sujithe8324352009-01-16 21:38:42 +0530832 return status;
833#undef PADBYTES
834}
835
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200836/*
837 * rix - rate index
838 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
839 * width - 0 for 20 MHz, 1 for 40 MHz
840 * half_gi - to use 4us v/s 3.6 us for symbol time
841 */
842static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
843 int width, int half_gi, bool shortPreamble)
844{
845 u32 nbits, nsymbits, duration, nsymbols;
846 int streams;
847
848 /* find number of symbols: PLCP + data */
849 streams = HT_RC_2_STREAMS(rix);
850 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
851 nsymbits = bits_per_symbol[rix % 8][width] * streams;
852 nsymbols = (nbits + nsymbits - 1) / nsymbits;
853
854 if (!half_gi)
855 duration = SYMBOL_TIME(nsymbols);
856 else
857 duration = SYMBOL_TIME_HALFGI(nsymbols);
858
859 /* addup duration for legacy/ht training and signal fields */
860 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
861
862 return duration;
863}
864
Felix Fietkau493cf042011-09-14 21:24:22 +0200865static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
866 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200867{
868 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200869 struct sk_buff *skb;
870 struct ieee80211_tx_info *tx_info;
871 struct ieee80211_tx_rate *rates;
872 const struct ieee80211_rate *rate;
873 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200874 int i;
875 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200876
877 skb = bf->bf_mpdu;
878 tx_info = IEEE80211_SKB_CB(skb);
879 rates = tx_info->control.rates;
880 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200881
882 /* set dur_update_en for l-sig computation except for PS-Poll frames */
883 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200884
885 /*
886 * We check if Short Preamble is needed for the CTS rate by
887 * checking the BSS's global flag.
888 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
889 */
890 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200891 info->rtscts_rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200892 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau493cf042011-09-14 21:24:22 +0200893 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200894
895 for (i = 0; i < 4; i++) {
896 bool is_40, is_sgi, is_sp;
897 int phy;
898
899 if (!rates[i].count || (rates[i].idx < 0))
900 continue;
901
902 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200903 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200904
905 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200906 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
907 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200908 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200909 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
910 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200911 }
912
913 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200914 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200915 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200916 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200917
918 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
919 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
920 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
921
922 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
923 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200924 info->rates[i].Rate = rix | 0x80;
925 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
926 ah->txchainmask, info->rates[i].Rate);
927 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200928 is_40, is_sgi, is_sp);
929 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200930 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200931 continue;
932 }
933
934 /* legacy rates */
935 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
936 !(rate->flags & IEEE80211_RATE_ERP_G))
937 phy = WLAN_RC_PHY_CCK;
938 else
939 phy = WLAN_RC_PHY_OFDM;
940
941 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +0200942 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200943 if (rate->hw_value_short) {
944 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +0200945 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200946 } else {
947 is_sp = false;
948 }
949
950 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +0200951 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200952 else
Felix Fietkau493cf042011-09-14 21:24:22 +0200953 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
954 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200955
Felix Fietkau493cf042011-09-14 21:24:22 +0200956 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200957 phy, rate->bitrate * 100, len, rix, is_sp);
958 }
959
960 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
961 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +0200962 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200963
964 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +0200965 if (info->flags & ATH9K_TXDESC_RTSENA)
966 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200967}
968
Felix Fietkau493cf042011-09-14 21:24:22 +0200969static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
970{
971 struct ieee80211_hdr *hdr;
972 enum ath9k_pkt_type htype;
973 __le16 fc;
974
975 hdr = (struct ieee80211_hdr *)skb->data;
976 fc = hdr->frame_control;
977
978 if (ieee80211_is_beacon(fc))
979 htype = ATH9K_PKT_TYPE_BEACON;
980 else if (ieee80211_is_probe_resp(fc))
981 htype = ATH9K_PKT_TYPE_PROBE_RESP;
982 else if (ieee80211_is_atim(fc))
983 htype = ATH9K_PKT_TYPE_ATIM;
984 else if (ieee80211_is_pspoll(fc))
985 htype = ATH9K_PKT_TYPE_PSPOLL;
986 else
987 htype = ATH9K_PKT_TYPE_NORMAL;
988
989 return htype;
990}
991
992static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
993 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +0200994{
995 struct ath_hw *ah = sc->sc_ah;
996 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
997 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +0200998 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +0200999 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +02001000
Felix Fietkau493cf042011-09-14 21:24:22 +02001001 memset(&info, 0, sizeof(info));
1002 info.is_first = true;
1003 info.is_last = true;
1004 info.txpower = MAX_RATE_POWER;
1005 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001006
Felix Fietkau493cf042011-09-14 21:24:22 +02001007 info.flags = ATH9K_TXDESC_INTREQ;
1008 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1009 info.flags |= ATH9K_TXDESC_NOACK;
1010 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1011 info.flags |= ATH9K_TXDESC_LDPC;
1012
1013 ath_buf_set_rate(sc, bf, &info, len);
1014
1015 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1016 info.flags |= ATH9K_TXDESC_CLRDMASK;
1017
1018 if (bf->bf_state.bfs_paprd)
1019 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1020
Felix Fietkau399c6482011-09-14 21:24:17 +02001021
1022 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001023 struct sk_buff *skb = bf->bf_mpdu;
1024 struct ath_frame_info *fi = get_frame_info(skb);
1025
1026 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001027 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001028 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001029 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001030 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001031
John W. Linville42cecc32011-09-19 15:42:31 -04001032 info.buf_addr[0] = bf->bf_buf_addr;
1033 info.buf_len[0] = skb->len;
Felix Fietkau493cf042011-09-14 21:24:22 +02001034 info.pkt_len = fi->framelen;
1035 info.keyix = fi->keyix;
1036 info.keytype = fi->keytype;
1037
1038 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001039 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001040 info.aggr = AGGR_BUF_FIRST;
1041 else if (!bf->bf_next)
1042 info.aggr = AGGR_BUF_LAST;
1043 else
1044 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001045
Felix Fietkau493cf042011-09-14 21:24:22 +02001046 info.ndelim = bf->bf_state.ndelim;
1047 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001048 }
1049
Felix Fietkau493cf042011-09-14 21:24:22 +02001050 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001051 bf = bf->bf_next;
1052 }
1053}
1054
Sujithe8324352009-01-16 21:38:42 +05301055static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1056 struct ath_atx_tid *tid)
1057{
Sujithd43f30152009-01-16 21:38:53 +05301058 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301059 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001060 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301061 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001062 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301063
1064 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001065 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301066 return;
1067
1068 INIT_LIST_HEAD(&bf_q);
1069
Felix Fietkau269c44b2010-11-14 15:20:06 +01001070 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301071
1072 /*
Sujithd43f30152009-01-16 21:38:53 +05301073 * no frames picked up to be aggregated;
1074 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301075 */
1076 if (list_empty(&bf_q))
1077 break;
1078
1079 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301080 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001081 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301082
Felix Fietkau55195412011-04-17 23:28:09 +02001083 if (tid->ac->clear_ps_filter) {
1084 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001085 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1086 } else {
1087 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001088 }
1089
Sujithd43f30152009-01-16 21:38:53 +05301090 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001091 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001092 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1093 bf->bf_state.bf_type = BUF_AMPDU;
1094 } else {
1095 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301096 }
1097
Felix Fietkau493cf042011-09-14 21:24:22 +02001098 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001099 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001100 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301101 status != ATH_AGGR_BAW_CLOSED);
1102}
1103
Felix Fietkau231c3a12010-09-20 19:35:28 +02001104int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1105 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301106{
1107 struct ath_atx_tid *txtid;
1108 struct ath_node *an;
1109
1110 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301111 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001112
1113 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1114 return -EAGAIN;
1115
Sujithf83da962009-07-23 15:32:37 +05301116 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001117 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001118 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001119
Felix Fietkau2ed72222011-01-10 17:05:49 -07001120 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1121 txtid->baw_head = txtid->baw_tail = 0;
1122
Felix Fietkau231c3a12010-09-20 19:35:28 +02001123 return 0;
Sujithe8324352009-01-16 21:38:42 +05301124}
1125
Sujithf83da962009-07-23 15:32:37 +05301126void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301127{
1128 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1129 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001130 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301131
1132 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301133 return;
Sujithe8324352009-01-16 21:38:42 +05301134
1135 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301136 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301137 return;
Sujithe8324352009-01-16 21:38:42 +05301138 }
1139
Sujithe8324352009-01-16 21:38:42 +05301140 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001141 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001142
1143 /*
1144 * If frames are still being transmitted for this TID, they will be
1145 * cleaned up during tx completion. To prevent race conditions, this
1146 * TID can only be reused after all in-progress subframes have been
1147 * completed.
1148 */
1149 if (txtid->baw_head != txtid->baw_tail)
1150 txtid->state |= AGGR_CLEANUP;
1151 else
1152 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +05301153 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301154
Felix Fietkau90fa5392010-09-20 13:45:38 +02001155 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +05301156}
1157
Johannes Berg042ec452011-09-29 16:04:26 +02001158void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1159 struct ath_node *an)
Felix Fietkau55195412011-04-17 23:28:09 +02001160{
1161 struct ath_atx_tid *tid;
1162 struct ath_atx_ac *ac;
1163 struct ath_txq *txq;
Johannes Berg042ec452011-09-29 16:04:26 +02001164 bool buffered;
Felix Fietkau55195412011-04-17 23:28:09 +02001165 int tidno;
1166
1167 for (tidno = 0, tid = &an->tid[tidno];
1168 tidno < WME_NUM_TID; tidno++, tid++) {
1169
1170 if (!tid->sched)
1171 continue;
1172
1173 ac = tid->ac;
1174 txq = ac->txq;
1175
1176 spin_lock_bh(&txq->axq_lock);
1177
Johannes Berg042ec452011-09-29 16:04:26 +02001178 buffered = !skb_queue_empty(&tid->buf_q);
Felix Fietkau55195412011-04-17 23:28:09 +02001179
1180 tid->sched = false;
1181 list_del(&tid->list);
1182
1183 if (ac->sched) {
1184 ac->sched = false;
1185 list_del(&ac->list);
1186 }
1187
1188 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +02001189
Johannes Berg042ec452011-09-29 16:04:26 +02001190 ieee80211_sta_set_buffered(sta, tidno, buffered);
1191 }
Felix Fietkau55195412011-04-17 23:28:09 +02001192}
1193
1194void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1195{
1196 struct ath_atx_tid *tid;
1197 struct ath_atx_ac *ac;
1198 struct ath_txq *txq;
1199 int tidno;
1200
1201 for (tidno = 0, tid = &an->tid[tidno];
1202 tidno < WME_NUM_TID; tidno++, tid++) {
1203
1204 ac = tid->ac;
1205 txq = ac->txq;
1206
1207 spin_lock_bh(&txq->axq_lock);
1208 ac->clear_ps_filter = true;
1209
Felix Fietkau56dc6332011-08-28 00:32:22 +02001210 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001211 ath_tx_queue_tid(txq, tid);
1212 ath_txq_schedule(sc, txq);
1213 }
1214
1215 spin_unlock_bh(&txq->axq_lock);
1216 }
1217}
1218
Sujithe8324352009-01-16 21:38:42 +05301219void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1220{
1221 struct ath_atx_tid *txtid;
1222 struct ath_node *an;
1223
1224 an = (struct ath_node *)sta->drv_priv;
1225
1226 if (sc->sc_flags & SC_OP_TXAGGR) {
1227 txtid = ATH_AN_2_TID(an, tid);
1228 txtid->baw_size =
1229 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1230 txtid->state |= AGGR_ADDBA_COMPLETE;
1231 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1232 ath_tx_resume_tid(sc, txtid);
1233 }
1234}
1235
Sujithe8324352009-01-16 21:38:42 +05301236/********************/
1237/* Queue Management */
1238/********************/
1239
Sujithe8324352009-01-16 21:38:42 +05301240static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1241 struct ath_txq *txq)
1242{
1243 struct ath_atx_ac *ac, *ac_tmp;
1244 struct ath_atx_tid *tid, *tid_tmp;
1245
1246 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1247 list_del(&ac->list);
1248 ac->sched = false;
1249 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1250 list_del(&tid->list);
1251 tid->sched = false;
1252 ath_tid_drain(sc, txq, tid);
1253 }
1254 }
1255}
1256
1257struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1258{
Sujithcbe61d82009-02-09 13:27:12 +05301259 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301260 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001261 static const int subtype_txq_to_hwq[] = {
1262 [WME_AC_BE] = ATH_TXQ_AC_BE,
1263 [WME_AC_BK] = ATH_TXQ_AC_BK,
1264 [WME_AC_VI] = ATH_TXQ_AC_VI,
1265 [WME_AC_VO] = ATH_TXQ_AC_VO,
1266 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001267 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301268
1269 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001270 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301271 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1272 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1273 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1274 qi.tqi_physCompBuf = 0;
1275
1276 /*
1277 * Enable interrupts only for EOL and DESC conditions.
1278 * We mark tx descriptors to receive a DESC interrupt
1279 * when a tx queue gets deep; otherwise waiting for the
1280 * EOL to reap descriptors. Note that this is done to
1281 * reduce interrupt load and this only defers reaping
1282 * descriptors, never transmitting frames. Aside from
1283 * reducing interrupts this also permits more concurrency.
1284 * The only potential downside is if the tx queue backs
1285 * up in which case the top half of the kernel may backup
1286 * due to a lack of tx descriptors.
1287 *
1288 * The UAPSD queue is an exception, since we take a desc-
1289 * based intr on the EOSP frames.
1290 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001291 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1292 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1293 TXQ_FLAG_TXERRINT_ENABLE;
1294 } else {
1295 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1296 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1297 else
1298 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1299 TXQ_FLAG_TXDESCINT_ENABLE;
1300 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001301 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1302 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301303 /*
1304 * NB: don't print a message, this happens
1305 * normally on parts with too few tx queues
1306 */
1307 return NULL;
1308 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001309 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1310 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301311
Ben Greear60f2d1d2011-01-09 23:11:52 -08001312 txq->axq_qnum = axq_qnum;
1313 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301314 txq->axq_link = NULL;
1315 INIT_LIST_HEAD(&txq->axq_q);
1316 INIT_LIST_HEAD(&txq->axq_acq);
1317 spin_lock_init(&txq->axq_lock);
1318 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001319 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001320 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001321 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001322
1323 txq->txq_headidx = txq->txq_tailidx = 0;
1324 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1325 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301326 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001327 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301328}
1329
Sujithe8324352009-01-16 21:38:42 +05301330int ath_txq_update(struct ath_softc *sc, int qnum,
1331 struct ath9k_tx_queue_info *qinfo)
1332{
Sujithcbe61d82009-02-09 13:27:12 +05301333 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301334 int error = 0;
1335 struct ath9k_tx_queue_info qi;
1336
1337 if (qnum == sc->beacon.beaconq) {
1338 /*
1339 * XXX: for beacon queue, we just save the parameter.
1340 * It will be picked up by ath_beaconq_config when
1341 * it's necessary.
1342 */
1343 sc->beacon.beacon_qi = *qinfo;
1344 return 0;
1345 }
1346
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001347 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301348
1349 ath9k_hw_get_txq_props(ah, qnum, &qi);
1350 qi.tqi_aifs = qinfo->tqi_aifs;
1351 qi.tqi_cwmin = qinfo->tqi_cwmin;
1352 qi.tqi_cwmax = qinfo->tqi_cwmax;
1353 qi.tqi_burstTime = qinfo->tqi_burstTime;
1354 qi.tqi_readyTime = qinfo->tqi_readyTime;
1355
1356 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001357 ath_err(ath9k_hw_common(sc->sc_ah),
1358 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301359 error = -EIO;
1360 } else {
1361 ath9k_hw_resettxqueue(ah, qnum);
1362 }
1363
1364 return error;
1365}
1366
1367int ath_cabq_update(struct ath_softc *sc)
1368{
1369 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001370 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301371 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301372
1373 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1374 /*
1375 * Ensure the readytime % is within the bounds.
1376 */
Sujith17d79042009-02-09 13:27:03 +05301377 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1378 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1379 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1380 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301381
Steve Brown9814f6b2011-02-07 17:10:39 -07001382 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301383 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301384 ath_txq_update(sc, qnum, &qi);
1385
1386 return 0;
1387}
1388
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001389static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1390{
1391 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1392 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1393}
1394
Felix Fietkaufce041b2011-05-19 12:20:25 +02001395static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1396 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301397 __releases(txq->axq_lock)
1398 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301399{
1400 struct ath_buf *bf, *lastbf;
1401 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001402 struct ath_tx_status ts;
1403
1404 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301405 INIT_LIST_HEAD(&bf_head);
1406
Felix Fietkaufce041b2011-05-19 12:20:25 +02001407 while (!list_empty(list)) {
1408 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301409
Felix Fietkaufce041b2011-05-19 12:20:25 +02001410 if (bf->bf_stale) {
1411 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301412
Felix Fietkaufce041b2011-05-19 12:20:25 +02001413 ath_tx_return_buffer(sc, bf);
1414 continue;
Sujithe8324352009-01-16 21:38:42 +05301415 }
1416
1417 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001418 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001419
Sujithe8324352009-01-16 21:38:42 +05301420 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001421 if (bf_is_ampdu_not_probing(bf))
1422 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301423
Felix Fietkaufce041b2011-05-19 12:20:25 +02001424 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301425 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001426 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1427 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301428 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001429 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001430 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001431 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001432}
1433
1434/*
1435 * Drain a given TX queue (could be Beacon or Data)
1436 *
1437 * This assumes output has been stopped and
1438 * we do not need to block ath_tx_tasklet.
1439 */
1440void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1441{
1442 spin_lock_bh(&txq->axq_lock);
1443 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1444 int idx = txq->txq_tailidx;
1445
1446 while (!list_empty(&txq->txq_fifo[idx])) {
1447 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1448 retry_tx);
1449
1450 INCR(idx, ATH_TXFIFO_DEPTH);
1451 }
1452 txq->txq_tailidx = idx;
1453 }
1454
1455 txq->axq_link = NULL;
1456 txq->axq_tx_inprogress = false;
1457 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001458
1459 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001460 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1461 ath_txq_drain_pending_buffers(sc, txq);
1462
1463 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301464}
1465
Felix Fietkau080e1a22010-12-05 20:17:53 +01001466bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301467{
Sujithcbe61d82009-02-09 13:27:12 +05301468 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001469 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301470 struct ath_txq *txq;
Felix Fietkau34d25812011-10-07 02:28:12 +02001471 int i;
1472 u32 npend = 0;
Sujith043a0402009-01-16 21:38:47 +05301473
1474 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001475 return true;
Sujith043a0402009-01-16 21:38:47 +05301476
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001477 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301478
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001479 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301480 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001481 if (!ATH_TXQ_SETUP(sc, i))
1482 continue;
1483
Felix Fietkau34d25812011-10-07 02:28:12 +02001484 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1485 npend |= BIT(i);
Sujith043a0402009-01-16 21:38:47 +05301486 }
1487
Felix Fietkau080e1a22010-12-05 20:17:53 +01001488 if (npend)
Felix Fietkau34d25812011-10-07 02:28:12 +02001489 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
Sujith043a0402009-01-16 21:38:47 +05301490
1491 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001492 if (!ATH_TXQ_SETUP(sc, i))
1493 continue;
1494
1495 /*
1496 * The caller will resume queues with ieee80211_wake_queues.
1497 * Mark the queue as not stopped to prevent ath_tx_complete
1498 * from waking the queue too early.
1499 */
1500 txq = &sc->tx.txq[i];
1501 txq->stopped = false;
1502 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301503 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001504
1505 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301506}
1507
Sujithe8324352009-01-16 21:38:42 +05301508void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1509{
1510 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1511 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1512}
1513
Ben Greear7755bad2011-01-18 17:30:00 -08001514/* For each axq_acq entry, for each tid, try to schedule packets
1515 * for transmit until ampdu_depth has reached min Q depth.
1516 */
Sujithe8324352009-01-16 21:38:42 +05301517void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1518{
Ben Greear7755bad2011-01-18 17:30:00 -08001519 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1520 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301521
Felix Fietkau236de512011-09-03 01:40:25 +02001522 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001523 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301524 return;
1525
1526 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001527 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301528
Ben Greear7755bad2011-01-18 17:30:00 -08001529 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1530 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1531 list_del(&ac->list);
1532 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301533
Ben Greear7755bad2011-01-18 17:30:00 -08001534 while (!list_empty(&ac->tid_q)) {
1535 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1536 list);
1537 list_del(&tid->list);
1538 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301539
Ben Greear7755bad2011-01-18 17:30:00 -08001540 if (tid->paused)
1541 continue;
Sujithe8324352009-01-16 21:38:42 +05301542
Ben Greear7755bad2011-01-18 17:30:00 -08001543 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301544
Ben Greear7755bad2011-01-18 17:30:00 -08001545 /*
1546 * add tid to round-robin queue if more frames
1547 * are pending for the tid
1548 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001549 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001550 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301551
Ben Greear7755bad2011-01-18 17:30:00 -08001552 if (tid == last_tid ||
1553 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1554 break;
Sujithe8324352009-01-16 21:38:42 +05301555 }
Ben Greear7755bad2011-01-18 17:30:00 -08001556
1557 if (!list_empty(&ac->tid_q)) {
1558 if (!ac->sched) {
1559 ac->sched = true;
1560 list_add_tail(&ac->list, &txq->axq_acq);
1561 }
1562 }
1563
1564 if (ac == last_ac ||
1565 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1566 return;
Sujithe8324352009-01-16 21:38:42 +05301567 }
1568}
1569
Sujithe8324352009-01-16 21:38:42 +05301570/***********/
1571/* TX, DMA */
1572/***********/
1573
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001574/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001575 * Insert a chain of ath_buf (descriptors) on a txq and
1576 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001577 */
Sujith102e0572008-10-29 10:15:16 +05301578static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001579 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001580{
Sujithcbe61d82009-02-09 13:27:12 +05301581 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001582 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001583 struct ath_buf *bf, *bf_last;
1584 bool puttxbuf = false;
1585 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301586
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001587 /*
1588 * Insert the frame on the outbound list and
1589 * pass it on to the hardware.
1590 */
1591
1592 if (list_empty(head))
1593 return;
1594
Felix Fietkaufce041b2011-05-19 12:20:25 +02001595 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001596 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001597 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001598
Joe Perches226afe62010-12-02 19:12:37 -08001599 ath_dbg(common, ATH_DBG_QUEUE,
1600 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001601
Felix Fietkaufce041b2011-05-19 12:20:25 +02001602 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1603 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001604 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001605 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001606 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001607 list_splice_tail_init(head, &txq->axq_q);
1608
Felix Fietkaufce041b2011-05-19 12:20:25 +02001609 if (txq->axq_link) {
1610 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001611 ath_dbg(common, ATH_DBG_XMIT,
1612 "link[%u] (%p)=%llx (%p)\n",
1613 txq->axq_qnum, txq->axq_link,
1614 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001615 } else if (!edma)
1616 puttxbuf = true;
1617
1618 txq->axq_link = bf_last->bf_desc;
1619 }
1620
1621 if (puttxbuf) {
1622 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1623 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1624 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1625 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1626 }
1627
1628 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001629 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001630 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001631 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001632
1633 if (!internal) {
1634 txq->axq_depth++;
1635 if (bf_is_ampdu_not_probing(bf))
1636 txq->axq_ampdu_depth++;
1637 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001638}
1639
Sujithe8324352009-01-16 21:38:42 +05301640static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001641 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301642{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001643 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001644 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001645 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301646
1647 /*
1648 * Do not queue to h/w when any of the following conditions is true:
1649 * - there are pending frames in software queue
1650 * - the TID is currently paused for ADDBA/BAR request
1651 * - seqno is not within block-ack window
1652 * - h/w queue depth exceeds low water mark
1653 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001654 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001655 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001656 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001657 /*
Sujithe8324352009-01-16 21:38:42 +05301658 * Add this frame to software queue for scheduling later
1659 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001660 */
Ben Greearbda8add2011-01-09 23:11:48 -08001661 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001662 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001663 if (!txctl->an || !txctl->an->sleeping)
1664 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301665 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001666 }
1667
Felix Fietkau44f1d262011-08-28 00:32:25 +02001668 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1669 if (!bf)
1670 return;
1671
Felix Fietkau399c6482011-09-14 21:24:17 +02001672 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001673 INIT_LIST_HEAD(&bf_head);
1674 list_add(&bf->list, &bf_head);
1675
Sujithe8324352009-01-16 21:38:42 +05301676 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001677 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301678
1679 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001680 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301681 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001682 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001683 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301684}
1685
Felix Fietkau82b873a2010-11-11 03:18:37 +01001686static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001687 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001688{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001689 struct ath_frame_info *fi = get_frame_info(skb);
1690 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301691 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001692
Felix Fietkau44f1d262011-08-28 00:32:25 +02001693 bf = fi->bf;
1694 if (!bf)
1695 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1696
1697 if (!bf)
1698 return;
1699
1700 INIT_LIST_HEAD(&bf_head);
1701 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001702 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301703
1704 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001705 if (tid)
1706 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301707
Sujithd43f30152009-01-16 21:38:53 +05301708 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001709 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001710 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301711 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001712}
1713
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001714static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1715 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301716{
1717 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001718 struct ieee80211_sta *sta = tx_info->control.sta;
1719 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001720 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001721 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001722 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001723 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301724
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001725 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301726
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001727 if (sta)
1728 an = (struct ath_node *) sta->drv_priv;
1729
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001730 memset(fi, 0, sizeof(*fi));
1731 if (hw_key)
1732 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001733 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1734 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001735 else
1736 fi->keyix = ATH9K_TXKEYIX_INVALID;
1737 fi->keytype = keytype;
1738 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301739}
1740
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301741u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1742{
1743 struct ath_hw *ah = sc->sc_ah;
1744 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301745 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1746 (curchan->channelFlags & CHANNEL_5GHZ) &&
1747 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301748 return 0x3;
1749 else
1750 return chainmask;
1751}
1752
Felix Fietkau44f1d262011-08-28 00:32:25 +02001753/*
1754 * Assign a descriptor (and sequence number if necessary,
1755 * and map buffer for DMA. Frees skb on error
1756 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001757static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001758 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001759 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001760 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301761{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001762 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001763 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001764 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001765 struct ath_buf *bf;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001766 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001767
1768 bf = ath_tx_get_buffer(sc);
1769 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001770 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001771 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001772 }
Sujithe8324352009-01-16 21:38:42 +05301773
Sujithe8324352009-01-16 21:38:42 +05301774 ATH_TXBUF_RESET(bf);
1775
Felix Fietkaufa05f872011-08-28 00:32:24 +02001776 if (tid) {
1777 seqno = tid->seq_next;
1778 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1779 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1780 bf->bf_state.seqno = seqno;
1781 }
1782
Sujithe8324352009-01-16 21:38:42 +05301783 bf->bf_mpdu = skb;
1784
Ben Greearc1739eb32010-10-14 12:45:29 -07001785 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1786 skb->len, DMA_TO_DEVICE);
1787 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301788 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001789 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001790 ath_err(ath9k_hw_common(sc->sc_ah),
1791 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001792 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001793 goto error;
Sujithe8324352009-01-16 21:38:42 +05301794 }
1795
Felix Fietkau56dc6332011-08-28 00:32:22 +02001796 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001797
1798 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001799
1800error:
1801 dev_kfree_skb_any(skb);
1802 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001803}
1804
1805/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001806static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001807 struct ath_tx_control *txctl)
1808{
Felix Fietkau04caf862010-11-14 15:20:12 +01001809 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1810 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001811 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001812 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001813 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301814
Sujithe8324352009-01-16 21:38:42 +05301815 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301816 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1817 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001818 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1819 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001820 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001821
Felix Fietkau066dae92010-11-07 14:59:39 +01001822 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001823 }
1824
1825 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001826 /*
1827 * Try aggregation if it's a unicast data frame
1828 * and the destination is HT capable.
1829 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001830 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301831 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001832 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1833 if (!bf)
1834 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001835
Felix Fietkau82b873a2010-11-11 03:18:37 +01001836 bf->bf_state.bfs_paprd = txctl->paprd;
1837
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301838 if (txctl->paprd)
1839 bf->bf_state.bfs_paprd_timestamp = jiffies;
1840
Felix Fietkau44f1d262011-08-28 00:32:25 +02001841 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301842 }
1843
Felix Fietkaufa05f872011-08-28 00:32:24 +02001844out:
Sujithe8324352009-01-16 21:38:42 +05301845 spin_unlock_bh(&txctl->txq->axq_lock);
1846}
1847
1848/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001849int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301850 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001851{
Felix Fietkau28d16702010-11-14 15:20:10 +01001852 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1853 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001854 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001855 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001856 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001857 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001858 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001859 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001860 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001861
Ben Greeara9927ba2010-12-06 21:13:49 -08001862 /* NOTE: sta can be NULL according to net/mac80211.h */
1863 if (sta)
1864 txctl->an = (struct ath_node *)sta->drv_priv;
1865
Felix Fietkau04caf862010-11-14 15:20:12 +01001866 if (info->control.hw_key)
1867 frmlen += info->control.hw_key->icv_len;
1868
Felix Fietkau28d16702010-11-14 15:20:10 +01001869 /*
1870 * As a temporary workaround, assign seq# here; this will likely need
1871 * to be cleaned up to work better with Beacon transmission and virtual
1872 * BSSes.
1873 */
1874 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1875 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1876 sc->tx.seq_no += 0x10;
1877 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1878 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1879 }
1880
John W. Linville42cecc32011-09-19 15:42:31 -04001881 /* Add the padding after the header if this is not already done */
1882 padpos = ath9k_cmn_padpos(hdr->frame_control);
1883 padsize = padpos & 3;
1884 if (padsize && skb->len > padpos) {
1885 if (skb_headroom(skb) < padsize)
1886 return -ENOMEM;
Felix Fietkau28d16702010-11-14 15:20:10 +01001887
John W. Linville42cecc32011-09-19 15:42:31 -04001888 skb_push(skb, padsize);
1889 memmove(skb->data, skb->data + padsize, padpos);
Felix Fietkau6e82bc4a2011-09-15 10:03:12 +02001890 hdr = (struct ieee80211_hdr *) skb->data;
Felix Fietkau28d16702010-11-14 15:20:10 +01001891 }
1892
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001893 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1894 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1895 !ieee80211_is_data(hdr->frame_control))
1896 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1897
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001898 setup_frame_info(hw, skb, frmlen);
1899
1900 /*
1901 * At this point, the vif, hw_key and sta pointers in the tx control
1902 * info are no longer valid (overwritten by the ath_frame_info data.
1903 */
1904
Felix Fietkau066dae92010-11-07 14:59:39 +01001905 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001906 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001907 if (txq == sc->tx.txq_map[q] &&
1908 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001909 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001910 txq->stopped = 1;
1911 }
1912 spin_unlock_bh(&txq->axq_lock);
1913
Felix Fietkau44f1d262011-08-28 00:32:25 +02001914 ath_tx_start_dma(sc, skb, txctl);
1915 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001916}
1917
Sujithe8324352009-01-16 21:38:42 +05301918/*****************/
1919/* TX Completion */
1920/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001921
Sujithe8324352009-01-16 21:38:42 +05301922static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301923 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001924{
Sujithe8324352009-01-16 21:38:42 +05301925 struct ieee80211_hw *hw = sc->hw;
1926 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001927 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001928 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001929 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301930
Joe Perches226afe62010-12-02 19:12:37 -08001931 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301932
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301933 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301934 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301935
Felix Fietkau55797b12011-09-14 21:24:16 +02001936 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301937 /* Frame was ACKed */
1938 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301939
John W. Linville42cecc32011-09-19 15:42:31 -04001940 padpos = ath9k_cmn_padpos(hdr->frame_control);
1941 padsize = padpos & 3;
1942 if (padsize && skb->len>padpos+padsize) {
1943 /*
1944 * Remove MAC header padding before giving the frame back to
1945 * mac80211.
1946 */
1947 memmove(skb->data + padsize, skb->data, padpos);
1948 skb_pull(skb, padsize);
Sujithe8324352009-01-16 21:38:42 +05301949 }
1950
Sujith1b04b932010-01-08 10:36:05 +05301951 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1952 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001953 ath_dbg(common, ATH_DBG_PS,
1954 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301955 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1956 PS_WAIT_FOR_CAB |
1957 PS_WAIT_FOR_PSPOLL_DATA |
1958 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001959 }
1960
Felix Fietkau7545daf2011-01-24 19:23:16 +01001961 q = skb_get_queue_mapping(skb);
1962 if (txq == sc->tx.txq_map[q]) {
1963 spin_lock_bh(&txq->axq_lock);
1964 if (WARN_ON(--txq->pending_frames < 0))
1965 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001966
Felix Fietkau7545daf2011-01-24 19:23:16 +01001967 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1968 ieee80211_wake_queue(sc->hw, q);
1969 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001970 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001971 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001972 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001973
1974 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301975}
1976
1977static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001978 struct ath_txq *txq, struct list_head *bf_q,
1979 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301980{
1981 struct sk_buff *skb = bf->bf_mpdu;
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001982 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithe8324352009-01-16 21:38:42 +05301983 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301984 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301985
Sujithe8324352009-01-16 21:38:42 +05301986 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301987 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301988
Felix Fietkau55797b12011-09-14 21:24:16 +02001989 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301990 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301991
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001992 if (ts->ts_status & ATH9K_TXERR_FILT)
1993 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1994
Ben Greearc1739eb32010-10-14 12:45:29 -07001995 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001996 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001997
1998 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301999 if (time_after(jiffies,
2000 bf->bf_state.bfs_paprd_timestamp +
2001 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002002 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002003 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002004 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002005 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002006 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302007 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002008 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002009 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2010 * accidentally reference it later.
2011 */
2012 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302013
2014 /*
2015 * Return the list of ath_buf of this mpdu to free queue
2016 */
2017 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2018 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2019 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2020}
2021
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002022static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2023 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002024 int txok)
Sujithc4288392008-11-18 09:09:30 +05302025{
Sujitha22be222009-03-30 15:28:36 +05302026 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302027 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302028 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002029 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002030 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302031 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302032
Sujith95e4acb2009-03-13 08:56:09 +05302033 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002034 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302035
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002036 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302037 WARN_ON(tx_rateindex >= hw->max_rates);
2038
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002039 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002040 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302041
Felix Fietkaub572d032010-11-14 15:20:07 +01002042 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002043 }
Rajkumar Manoharan185d1582011-09-26 21:48:39 +05302044 tx_info->status.ampdu_len = nframes;
2045 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002046
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002047 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002048 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002049 /*
2050 * If an underrun error is seen assume it as an excessive
2051 * retry only if max frame trigger level has been reached
2052 * (2 KB for single stream, and 4 KB for dual stream).
2053 * Adjust the long retry as if the frame was tried
2054 * hw->max_rate_tries times to affect how rate control updates
2055 * PER for the failed rate.
2056 * In case of congestion on the bus penalizing this type of
2057 * underruns should help hardware actually transmit new frames
2058 * successfully by eventually preferring slower rates.
2059 * This itself should also alleviate congestion on the bus.
2060 */
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002061 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2062 ATH9K_TX_DELIM_UNDERRUN)) &&
2063 ieee80211_is_data(hdr->frame_control) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002064 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002065 tx_info->status.rates[tx_rateindex].count =
2066 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302067 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302068
Felix Fietkau545750d2009-11-23 22:21:01 +01002069 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302070 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002071 tx_info->status.rates[i].idx = -1;
2072 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302073
Felix Fietkau78c46532010-06-25 01:26:16 +02002074 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302075}
2076
Felix Fietkaufce041b2011-05-19 12:20:25 +02002077static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2078 struct ath_tx_status *ts, struct ath_buf *bf,
2079 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302080 __releases(txq->axq_lock)
2081 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002082{
2083 int txok;
2084
2085 txq->axq_depth--;
2086 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2087 txq->axq_tx_inprogress = false;
2088 if (bf_is_ampdu_not_probing(bf))
2089 txq->axq_ampdu_depth--;
2090
2091 spin_unlock_bh(&txq->axq_lock);
2092
2093 if (!bf_isampdu(bf)) {
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002094 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002095 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2096 } else
2097 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2098
2099 spin_lock_bh(&txq->axq_lock);
2100
2101 if (sc->sc_flags & SC_OP_TXAGGR)
2102 ath_txq_schedule(sc, txq);
2103}
2104
Sujithc4288392008-11-18 09:09:30 +05302105static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002106{
Sujithcbe61d82009-02-09 13:27:12 +05302107 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002108 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002109 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2110 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302111 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002112 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002113 int status;
2114
Joe Perches226afe62010-12-02 19:12:37 -08002115 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2116 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2117 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002118
Felix Fietkaufce041b2011-05-19 12:20:25 +02002119 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002120 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002121 if (work_pending(&sc->hw_reset_work))
2122 break;
2123
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002124 if (list_empty(&txq->axq_q)) {
2125 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002126 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002127 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002128 break;
2129 }
2130 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2131
2132 /*
2133 * There is a race condition that a BH gets scheduled
2134 * after sw writes TxE and before hw re-load the last
2135 * descriptor to get the newly chained one.
2136 * Software must keep the last DONE descriptor as a
2137 * holding descriptor - software does so by marking
2138 * it with the STALE flag.
2139 */
2140 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302141 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002142 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002143 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002144 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002145
2146 bf = list_entry(bf_held->list.next, struct ath_buf,
2147 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002148 }
2149
2150 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302151 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002152
Felix Fietkau29bffa92010-03-29 20:14:23 -07002153 memset(&ts, 0, sizeof(ts));
2154 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002155 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002156 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002157
Ben Greear2dac4fb2011-01-09 23:11:45 -08002158 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002159
2160 /*
2161 * Remove ath_buf's of the same transmit unit from txq,
2162 * however leave the last descriptor back as the holding
2163 * descriptor for hw.
2164 */
Sujitha119cc42009-03-30 15:28:38 +05302165 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002166 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002167 if (!list_is_singular(&lastbf->list))
2168 list_cut_position(&bf_head,
2169 &txq->axq_q, lastbf->list.prev);
2170
Felix Fietkaufce041b2011-05-19 12:20:25 +02002171 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002172 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002173 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002174 }
Johannes Berge6a98542008-10-21 12:40:02 +02002175
Felix Fietkaufce041b2011-05-19 12:20:25 +02002176 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002177 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002178 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002179}
2180
Sujith305fe472009-07-23 15:32:29 +05302181static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002182{
2183 struct ath_softc *sc = container_of(work, struct ath_softc,
2184 tx_complete_work.work);
2185 struct ath_txq *txq;
2186 int i;
2187 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002188#ifdef CONFIG_ATH9K_DEBUGFS
2189 sc->tx_complete_poll_work_seen++;
2190#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002191
2192 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2193 if (ATH_TXQ_SETUP(sc, i)) {
2194 txq = &sc->tx.txq[i];
2195 spin_lock_bh(&txq->axq_lock);
2196 if (txq->axq_depth) {
2197 if (txq->axq_tx_inprogress) {
2198 needreset = true;
2199 spin_unlock_bh(&txq->axq_lock);
2200 break;
2201 } else {
2202 txq->axq_tx_inprogress = true;
2203 }
2204 }
2205 spin_unlock_bh(&txq->axq_lock);
2206 }
2207
2208 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002209 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2210 "tx hung, resetting the chip\n");
Felix Fietkau030d6292011-10-07 02:28:13 +02002211 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
Felix Fietkau236de512011-09-03 01:40:25 +02002212 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002213 }
2214
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002215 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002216 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2217}
2218
2219
Sujithe8324352009-01-16 21:38:42 +05302220
2221void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002222{
Sujithe8324352009-01-16 21:38:42 +05302223 int i;
2224 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002225
Sujithe8324352009-01-16 21:38:42 +05302226 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002227
2228 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302229 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2230 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002231 }
2232}
2233
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002234void ath_tx_edma_tasklet(struct ath_softc *sc)
2235{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002236 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002237 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2238 struct ath_hw *ah = sc->sc_ah;
2239 struct ath_txq *txq;
2240 struct ath_buf *bf, *lastbf;
2241 struct list_head bf_head;
2242 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002243
2244 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002245 if (work_pending(&sc->hw_reset_work))
2246 break;
2247
Felix Fietkaufce041b2011-05-19 12:20:25 +02002248 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002249 if (status == -EINPROGRESS)
2250 break;
2251 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002252 ath_dbg(common, ATH_DBG_XMIT,
2253 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002254 break;
2255 }
2256
2257 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002258 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002259 continue;
2260
Felix Fietkaufce041b2011-05-19 12:20:25 +02002261 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002262
2263 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002264
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002265 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2266 spin_unlock_bh(&txq->axq_lock);
2267 return;
2268 }
2269
2270 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2271 struct ath_buf, list);
2272 lastbf = bf->bf_lastbf;
2273
2274 INIT_LIST_HEAD(&bf_head);
2275 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2276 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002277
Felix Fietkaufce041b2011-05-19 12:20:25 +02002278 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2279 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002280
Felix Fietkaufce041b2011-05-19 12:20:25 +02002281 if (!list_empty(&txq->axq_q)) {
2282 struct list_head bf_q;
2283
2284 INIT_LIST_HEAD(&bf_q);
2285 txq->axq_link = NULL;
2286 list_splice_tail_init(&txq->axq_q, &bf_q);
2287 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2288 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002289 }
2290
Felix Fietkaufce041b2011-05-19 12:20:25 +02002291 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002292 spin_unlock_bh(&txq->axq_lock);
2293 }
2294}
2295
Sujithe8324352009-01-16 21:38:42 +05302296/*****************/
2297/* Init, Cleanup */
2298/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002299
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002300static int ath_txstatus_setup(struct ath_softc *sc, int size)
2301{
2302 struct ath_descdma *dd = &sc->txsdma;
2303 u8 txs_len = sc->sc_ah->caps.txs_len;
2304
2305 dd->dd_desc_len = size * txs_len;
2306 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2307 &dd->dd_desc_paddr, GFP_KERNEL);
2308 if (!dd->dd_desc)
2309 return -ENOMEM;
2310
2311 return 0;
2312}
2313
2314static int ath_tx_edma_init(struct ath_softc *sc)
2315{
2316 int err;
2317
2318 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2319 if (!err)
2320 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2321 sc->txsdma.dd_desc_paddr,
2322 ATH_TXSTATUS_RING_SIZE);
2323
2324 return err;
2325}
2326
2327static void ath_tx_edma_cleanup(struct ath_softc *sc)
2328{
2329 struct ath_descdma *dd = &sc->txsdma;
2330
2331 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2332 dd->dd_desc_paddr);
2333}
2334
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002335int ath_tx_init(struct ath_softc *sc, int nbufs)
2336{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002337 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002338 int error = 0;
2339
Sujith797fe5cb2009-03-30 15:28:45 +05302340 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002341
Sujith797fe5cb2009-03-30 15:28:45 +05302342 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002343 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302344 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002345 ath_err(common,
2346 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302347 goto err;
2348 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002349
Sujith797fe5cb2009-03-30 15:28:45 +05302350 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002351 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302352 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002353 ath_err(common,
2354 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302355 goto err;
2356 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002357
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002358 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2359
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002360 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2361 error = ath_tx_edma_init(sc);
2362 if (error)
2363 goto err;
2364 }
2365
Sujith797fe5cb2009-03-30 15:28:45 +05302366err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002367 if (error != 0)
2368 ath_tx_cleanup(sc);
2369
2370 return error;
2371}
2372
Sujith797fe5cb2009-03-30 15:28:45 +05302373void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002374{
Sujithb77f4832008-12-07 21:44:03 +05302375 if (sc->beacon.bdma.dd_desc_len != 0)
2376 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002377
Sujithb77f4832008-12-07 21:44:03 +05302378 if (sc->tx.txdma.dd_desc_len != 0)
2379 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002380
2381 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2382 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002383}
2384
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002385void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2386{
Sujithc5170162008-10-29 10:13:59 +05302387 struct ath_atx_tid *tid;
2388 struct ath_atx_ac *ac;
2389 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002390
Sujith8ee5afb2008-12-07 21:43:36 +05302391 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302392 tidno < WME_NUM_TID;
2393 tidno++, tid++) {
2394 tid->an = an;
2395 tid->tidno = tidno;
2396 tid->seq_start = tid->seq_next = 0;
2397 tid->baw_size = WME_MAX_BA;
2398 tid->baw_head = tid->baw_tail = 0;
2399 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302400 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302401 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002402 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302403 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302404 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302405 tid->state &= ~AGGR_ADDBA_COMPLETE;
2406 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302407 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002408
Sujith8ee5afb2008-12-07 21:43:36 +05302409 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302410 acno < WME_NUM_AC; acno++, ac++) {
2411 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002412 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302413 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002414 }
2415}
2416
Sujithb5aa9bf2008-10-29 10:13:31 +05302417void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002418{
Felix Fietkau2b409942010-07-07 19:42:08 +02002419 struct ath_atx_ac *ac;
2420 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002421 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002422 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302423
Felix Fietkau2b409942010-07-07 19:42:08 +02002424 for (tidno = 0, tid = &an->tid[tidno];
2425 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002426
Felix Fietkau2b409942010-07-07 19:42:08 +02002427 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002428 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002429
Felix Fietkau2b409942010-07-07 19:42:08 +02002430 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002431
Felix Fietkau2b409942010-07-07 19:42:08 +02002432 if (tid->sched) {
2433 list_del(&tid->list);
2434 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002435 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002436
2437 if (ac->sched) {
2438 list_del(&ac->list);
2439 tid->ac->sched = false;
2440 }
2441
2442 ath_tid_drain(sc, txq, tid);
2443 tid->state &= ~AGGR_ADDBA_COMPLETE;
2444 tid->state &= ~AGGR_CLEANUP;
2445
2446 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002447 }
2448}