blob: 9903fc3af723ecd976f4adce6c2dddbd46ec4f49 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +020061 int txok);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Sujithe8324352009-01-16 21:38:42 +0530107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
108{
109 struct ath_atx_ac *ac = tid->ac;
110
111 if (tid->paused)
112 return;
113
114 if (tid->sched)
115 return;
116
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
119
120 if (ac->sched)
121 return;
122
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
126
Sujithe8324352009-01-16 21:38:42 +0530127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128{
Felix Fietkau066dae92010-11-07 14:59:39 +0100129 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530130
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200131 WARN_ON(!tid->paused);
132
Sujithe8324352009-01-16 21:38:42 +0530133 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200134 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530135
Felix Fietkau56dc6332011-08-28 00:32:22 +0200136 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530137 goto unlock;
138
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
143}
144
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100151}
152
Sujithe8324352009-01-16 21:38:42 +0530153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
Felix Fietkau066dae92010-11-07 14:59:39 +0100155 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200156 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530157 struct ath_buf *bf;
158 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100160 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200161
Sujithe8324352009-01-16 21:38:42 +0530162 INIT_LIST_HEAD(&bf_head);
163
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530165 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530166
Felix Fietkau56dc6332011-08-28 00:32:22 +0200167 while ((skb = __skb_dequeue(&tid->buf_q))) {
168 fi = get_frame_info(skb);
169 bf = fi->bf;
170
Felix Fietkaue1566d12010-11-20 03:08:46 +0100171 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200172 if (bf && fi->retries) {
173 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200176 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200177 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200178 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100179 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530180 }
181
182 spin_unlock_bh(&txq->axq_lock);
183}
184
185static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
186 int seqno)
187{
188 int index, cindex;
189
190 index = ATH_BA_INDEX(tid->seq_start, seqno);
191 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
192
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200193 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530194
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200195 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530196 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
197 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
198 }
199}
200
201static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100202 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530203{
204 int index, cindex;
205
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100206 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530207 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200208 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530209
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
214 }
215}
216
217/*
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
222 */
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
225
226{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200227 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530228 struct ath_buf *bf;
229 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700230 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100231 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700232
233 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530234 INIT_LIST_HEAD(&bf_head);
235
Felix Fietkau56dc6332011-08-28 00:32:22 +0200236 while ((skb = __skb_dequeue(&tid->buf_q))) {
237 fi = get_frame_info(skb);
238 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530239
Felix Fietkau44f1d262011-08-28 00:32:25 +0200240 if (!bf) {
241 spin_unlock(&txq->axq_lock);
242 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
243 spin_lock(&txq->axq_lock);
244 continue;
245 }
246
Felix Fietkau56dc6332011-08-28 00:32:22 +0200247 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530248
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100249 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200250 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530251
252 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700253 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530254 spin_lock(&txq->axq_lock);
255 }
256
257 tid->seq_next = tid->seq_start;
258 tid->baw_tail = tid->baw_head;
259}
260
Sujithfec247c2009-07-27 12:08:16 +0530261static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100262 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530263{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100264 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200265 struct ath_buf *bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530266 struct ieee80211_hdr *hdr;
267
Sujithfec247c2009-07-27 12:08:16 +0530268 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100269 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100270 return;
Sujithe8324352009-01-16 21:38:42 +0530271
Sujithe8324352009-01-16 21:38:42 +0530272 hdr = (struct ieee80211_hdr *)skb->data;
273 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
Felix Fietkauf11cc942011-09-15 12:59:49 +0200274 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
275 sizeof(*hdr), DMA_TO_DEVICE);
Sujithe8324352009-01-16 21:38:42 +0530276}
277
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200278static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
279{
280 struct ath_buf *bf = NULL;
281
282 spin_lock_bh(&sc->tx.txbuflock);
283
284 if (unlikely(list_empty(&sc->tx.txbuf))) {
285 spin_unlock_bh(&sc->tx.txbuflock);
286 return NULL;
287 }
288
289 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
290 list_del(&bf->list);
291
292 spin_unlock_bh(&sc->tx.txbuflock);
293
294 return bf;
295}
296
297static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
298{
299 spin_lock_bh(&sc->tx.txbuflock);
300 list_add_tail(&bf->list, &sc->tx.txbuf);
301 spin_unlock_bh(&sc->tx.txbuflock);
302}
303
Sujithd43f30152009-01-16 21:38:53 +0530304static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
305{
306 struct ath_buf *tbf;
307
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200308 tbf = ath_tx_get_buffer(sc);
309 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530310 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530311
312 ATH_TXBUF_RESET(tbf);
313
314 tbf->bf_mpdu = bf->bf_mpdu;
315 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400316 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530317 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530318
319 return tbf;
320}
321
Felix Fietkaub572d032010-11-14 15:20:07 +0100322static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
323 struct ath_tx_status *ts, int txok,
324 int *nframes, int *nbad)
325{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100326 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100327 u16 seq_st = 0;
328 u32 ba[WME_BA_BMP_SIZE >> 5];
329 int ba_index;
330 int isaggr = 0;
331
332 *nbad = 0;
333 *nframes = 0;
334
Felix Fietkaub572d032010-11-14 15:20:07 +0100335 isaggr = bf_isaggr(bf);
336 if (isaggr) {
337 seq_st = ts->ts_seqnum;
338 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
339 }
340
341 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100342 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200343 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100344
345 (*nframes)++;
346 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
347 (*nbad)++;
348
349 bf = bf->bf_next;
350 }
351}
352
353
Sujithd43f30152009-01-16 21:38:53 +0530354static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
355 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100356 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530357{
358 struct ath_node *an = NULL;
359 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530360 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100361 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530362 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800363 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530364 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530365 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200366 struct list_head bf_head;
367 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530368 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530369 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530370 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
371 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200372 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100373 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200374 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100375 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +0530376
Sujitha22be222009-03-30 15:28:36 +0530377 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530378 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530379
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800380 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800381
Felix Fietkau78c46532010-06-25 01:26:16 +0200382 memcpy(rates, tx_info->control.rates, sizeof(rates));
383
Sujith1286ec62009-01-27 13:30:37 +0530384 rcu_read_lock();
385
Ben Greear686b9cb2010-09-23 09:44:36 -0700386 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530387 if (!sta) {
388 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200389
Felix Fietkau31e79a52010-07-12 23:16:34 +0200390 INIT_LIST_HEAD(&bf_head);
391 while (bf) {
392 bf_next = bf->bf_next;
393
Felix Fietkaufce041b2011-05-19 12:20:25 +0200394 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200395 list_move_tail(&bf->list, &bf_head);
396
Felix Fietkau31e79a52010-07-12 23:16:34 +0200397 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
398 0, 0);
399
400 bf = bf_next;
401 }
Sujith1286ec62009-01-27 13:30:37 +0530402 return;
Sujithe8324352009-01-16 21:38:42 +0530403 }
404
Sujith1286ec62009-01-27 13:30:37 +0530405 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100406 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
407 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530408
Felix Fietkaub11b1602010-07-11 12:48:44 +0200409 /*
410 * The hardware occasionally sends a tx status for the wrong TID.
411 * In this case, the BA status cannot be considered valid and all
412 * subframes need to be retransmitted
413 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100414 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200415 txok = false;
416
Sujithe8324352009-01-16 21:38:42 +0530417 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530418 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530419
Sujithd43f30152009-01-16 21:38:53 +0530420 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700421 if (ts->ts_flags & ATH9K_TX_BA) {
422 seq_st = ts->ts_seqnum;
423 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530424 } else {
Sujithd43f30152009-01-16 21:38:53 +0530425 /*
426 * AR5416 can become deaf/mute when BA
427 * issue happens. Chip needs to be reset.
428 * But AP code may have sychronization issues
429 * when perform internal reset in this routine.
430 * Only enable reset in STA mode for now.
431 */
Sujith2660b812009-02-09 13:27:26 +0530432 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530433 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530434 }
435 }
436
Felix Fietkau56dc6332011-08-28 00:32:22 +0200437 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530438
Felix Fietkaub572d032010-11-14 15:20:07 +0100439 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530440 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200441 u16 seqno = bf->bf_state.seqno;
442
Felix Fietkauf0b82202011-01-15 14:30:15 +0100443 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530444 bf_next = bf->bf_next;
445
Felix Fietkau78c46532010-06-25 01:26:16 +0200446 skb = bf->bf_mpdu;
447 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100448 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200449
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200450 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530451 /* transmit completion, subframe is
452 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530453 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530454 } else if (!isaggr && txok) {
455 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530456 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530457 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200458 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530459 /*
460 * cleanup in progress, just fail
461 * the un-acked sub-frames
462 */
463 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200464 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
Felix Fietkau26a64252011-10-07 02:28:14 +0200465 if (txok || !an->sleeping)
Felix Fietkau55195412011-04-17 23:28:09 +0200466 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
467
Felix Fietkau55195412011-04-17 23:28:09 +0200468 txpending = 1;
469 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200470 txfail = 1;
471 sendbar = 1;
472 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530473 }
474 }
475
Felix Fietkaufce041b2011-05-19 12:20:25 +0200476 /*
477 * Make sure the last desc is reclaimed if it
478 * not a holding desc.
479 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200480 INIT_LIST_HEAD(&bf_head);
481 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
482 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530483 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530484
Felix Fietkau90fa5392010-09-20 13:45:38 +0200485 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530486 /*
487 * complete the acked-ones/xretried ones; update
488 * block-ack window
489 */
490 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200491 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530492 spin_unlock_bh(&txq->axq_lock);
493
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530494 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200495 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau3afd21e2011-09-14 21:24:26 +0200496 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530497 rc_update = false;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530498 }
499
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700500 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
501 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530502 } else {
Sujithd43f30152009-01-16 21:38:53 +0530503 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400504 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
505 if (bf->bf_next == NULL && bf_last->bf_stale) {
506 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530507
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400508 tbf = ath_clone_txbuf(sc, bf_last);
509 /*
510 * Update tx baw and complete the
511 * frame with failed status if we
512 * run out of tx buf.
513 */
514 if (!tbf) {
515 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200516 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400517 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400518
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400519 ath_tx_complete_buf(sc, bf, txq,
520 &bf_head,
Felix Fietkau55797b12011-09-14 21:24:16 +0200521 ts, 0, 1);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400522 break;
523 }
524
Felix Fietkau56dc6332011-08-28 00:32:22 +0200525 fi->bf = tbf;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400526 }
Sujithe8324352009-01-16 21:38:42 +0530527 }
528
529 /*
530 * Put this buffer to the temporary pending
531 * queue to retain ordering
532 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200533 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530534 }
535
536 bf = bf_next;
537 }
538
Felix Fietkau4cee7862010-07-23 03:53:16 +0200539 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200540 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200541 if (an->sleeping)
Johannes Berg042ec452011-09-29 16:04:26 +0200542 ieee80211_sta_set_buffered(sta, tid->tidno, true);
Felix Fietkau55195412011-04-17 23:28:09 +0200543
Felix Fietkau4cee7862010-07-23 03:53:16 +0200544 spin_lock_bh(&txq->axq_lock);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200545 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau26a64252011-10-07 02:28:14 +0200546 if (!an->sleeping) {
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600547 ath_tx_queue_tid(txq, tid);
Felix Fietkau26a64252011-10-07 02:28:14 +0200548
549 if (ts->ts_status & ATH9K_TXERR_FILT)
550 tid->ac->clear_ps_filter = true;
551 }
Felix Fietkau4cee7862010-07-23 03:53:16 +0200552 spin_unlock_bh(&txq->axq_lock);
553 }
554
Sujithe8324352009-01-16 21:38:42 +0530555 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200556 ath_tx_flush_tid(sc, tid);
557
Sujithe8324352009-01-16 21:38:42 +0530558 if (tid->baw_head == tid->baw_tail) {
559 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530560 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530561 }
Sujithe8324352009-01-16 21:38:42 +0530562 }
563
Sujith1286ec62009-01-27 13:30:37 +0530564 rcu_read_unlock();
565
Felix Fietkau030d6292011-10-07 02:28:13 +0200566 if (needreset) {
567 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
Felix Fietkau236de512011-09-03 01:40:25 +0200568 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Felix Fietkau030d6292011-10-07 02:28:13 +0200569 }
Sujithe8324352009-01-16 21:38:42 +0530570}
571
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530572static bool ath_lookup_legacy(struct ath_buf *bf)
573{
574 struct sk_buff *skb;
575 struct ieee80211_tx_info *tx_info;
576 struct ieee80211_tx_rate *rates;
577 int i;
578
579 skb = bf->bf_mpdu;
580 tx_info = IEEE80211_SKB_CB(skb);
581 rates = tx_info->control.rates;
582
Felix Fietkau059ee092011-08-27 10:25:27 +0200583 for (i = 0; i < 4; i++) {
584 if (!rates[i].count || rates[i].idx < 0)
585 break;
586
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530587 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
588 return true;
589 }
590
591 return false;
592}
593
Sujithe8324352009-01-16 21:38:42 +0530594static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
595 struct ath_atx_tid *tid)
596{
Sujithe8324352009-01-16 21:38:42 +0530597 struct sk_buff *skb;
598 struct ieee80211_tx_info *tx_info;
599 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530600 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530601 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530602 int i;
603
Sujitha22be222009-03-30 15:28:36 +0530604 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530605 tx_info = IEEE80211_SKB_CB(skb);
606 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530607
608 /*
609 * Find the lowest frame length among the rate series that will have a
610 * 4ms transmit duration.
611 * TODO - TXOP limit needs to be considered.
612 */
613 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
614
615 for (i = 0; i < 4; i++) {
616 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100617 int modeidx;
618 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530619 legacy = 1;
620 break;
621 }
622
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200623 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100624 modeidx = MCS_HT40;
625 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200626 modeidx = MCS_HT20;
627
628 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
629 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100630
631 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530632 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530633 }
634 }
635
636 /*
637 * limit aggregate size by the minimum rate if rate selected is
638 * not a probe rate, if rate selected is a probe rate then
639 * avoid aggregation of this packet.
640 */
641 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
642 return 0;
643
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530644 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
645 aggr_limit = min((max_4ms_framelen * 3) / 8,
646 (u32)ATH_AMPDU_LIMIT_MAX);
647 else
648 aggr_limit = min(max_4ms_framelen,
649 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530650
651 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300652 * h/w can accept aggregates up to 16 bit lengths (65535).
653 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530654 * as zero. Ignore 65536 since we are constrained by hw.
655 */
Sujith4ef70842009-07-23 15:32:41 +0530656 if (tid->an->maxampdu)
657 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530658
659 return aggr_limit;
660}
661
662/*
Sujithd43f30152009-01-16 21:38:53 +0530663 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530664 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530665 */
666static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530667 struct ath_buf *bf, u16 frmlen,
668 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530669{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530670#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530671 struct sk_buff *skb = bf->bf_mpdu;
672 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530673 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530674 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100675 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200676 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100677 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530678
679 /* Select standard number of delimiters based on frame length alone */
680 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
681
682 /*
683 * If encryption enabled, hardware requires some more padding between
684 * subframes.
685 * TODO - this could be improved to be dependent on the rate.
686 * The hardware can keep up at lower rates, but not higher rates
687 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530688 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
689 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530690 ndelim += ATH_AGGR_ENCRYPTDELIM;
691
692 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530693 * Add delimiter when using RTS/CTS with aggregation
694 * and non enterprise AR9003 card
695 */
Felix Fietkau34597312011-08-29 18:57:54 +0200696 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
697 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530698 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
699
700 /*
Sujithe8324352009-01-16 21:38:42 +0530701 * Convert desired mpdu density from microeconds to bytes based
702 * on highest rate in rate series (i.e. first rate) to determine
703 * required minimum length for subframe. Take into account
704 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530705 *
Sujithe8324352009-01-16 21:38:42 +0530706 * If there is no mpdu density restriction, no further calculation
707 * is needed.
708 */
Sujith4ef70842009-07-23 15:32:41 +0530709
710 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530711 return ndelim;
712
713 rix = tx_info->control.rates[0].idx;
714 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530715 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
716 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
717
718 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530719 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530720 else
Sujith4ef70842009-07-23 15:32:41 +0530721 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530722
723 if (nsymbols == 0)
724 nsymbols = 1;
725
Felix Fietkauc6663872010-04-19 19:57:33 +0200726 streams = HT_RC_2_STREAMS(rix);
727 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530728 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
729
Sujithe8324352009-01-16 21:38:42 +0530730 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530731 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
732 ndelim = max(mindelim, ndelim);
733 }
734
735 return ndelim;
736}
737
738static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530739 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530740 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100741 struct list_head *bf_q,
742 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530743{
744#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200745 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530746 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530747 u16 aggr_limit = 0, al = 0, bpad = 0,
748 al_delta, h_baw = tid->baw_size / 2;
749 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200750 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100751 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200752 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200753 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530754
755 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200756 skb = skb_peek(&tid->buf_q);
757 fi = get_frame_info(skb);
758 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200759 if (!fi->bf)
760 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200761
Felix Fietkau44f1d262011-08-28 00:32:25 +0200762 if (!bf)
763 continue;
764
Felix Fietkau399c6482011-09-14 21:24:17 +0200765 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200766 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200767 if (!bf_first)
768 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530769
Sujithd43f30152009-01-16 21:38:53 +0530770 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200771 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530772 status = ATH_AGGR_BAW_CLOSED;
773 break;
774 }
775
776 if (!rl) {
777 aggr_limit = ath_lookup_rate(sc, bf, tid);
778 rl = 1;
779 }
780
Sujithd43f30152009-01-16 21:38:53 +0530781 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100782 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530783
Sujithd43f30152009-01-16 21:38:53 +0530784 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530785 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
786 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530787 status = ATH_AGGR_LIMITED;
788 break;
789 }
790
Felix Fietkau0299a502010-10-21 02:47:24 +0200791 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200792 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200793 break;
794
Sujithd43f30152009-01-16 21:38:53 +0530795 /* do not exceed subframe limit */
796 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530797 status = ATH_AGGR_LIMITED;
798 break;
799 }
800
Sujithd43f30152009-01-16 21:38:53 +0530801 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530802 al += bpad + al_delta;
803
804 /*
805 * Get the delimiters needed to meet the MPDU
806 * density for this node.
807 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530808 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
809 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530810 bpad = PADBYTES(al_delta) + (ndelim << 2);
811
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530812 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530813 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530814
Sujithd43f30152009-01-16 21:38:53 +0530815 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100816 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200817 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200818 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200819
820 __skb_unlink(skb, &tid->buf_q);
821 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200822 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530823 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200824
Sujithe8324352009-01-16 21:38:42 +0530825 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530826
Felix Fietkau56dc6332011-08-28 00:32:22 +0200827 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530828
Felix Fietkau269c44b2010-11-14 15:20:06 +0100829 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530830
Sujithe8324352009-01-16 21:38:42 +0530831 return status;
832#undef PADBYTES
833}
834
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200835/*
836 * rix - rate index
837 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
838 * width - 0 for 20 MHz, 1 for 40 MHz
839 * half_gi - to use 4us v/s 3.6 us for symbol time
840 */
841static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
842 int width, int half_gi, bool shortPreamble)
843{
844 u32 nbits, nsymbits, duration, nsymbols;
845 int streams;
846
847 /* find number of symbols: PLCP + data */
848 streams = HT_RC_2_STREAMS(rix);
849 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
850 nsymbits = bits_per_symbol[rix % 8][width] * streams;
851 nsymbols = (nbits + nsymbits - 1) / nsymbits;
852
853 if (!half_gi)
854 duration = SYMBOL_TIME(nsymbols);
855 else
856 duration = SYMBOL_TIME_HALFGI(nsymbols);
857
858 /* addup duration for legacy/ht training and signal fields */
859 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
860
861 return duration;
862}
863
Felix Fietkau493cf042011-09-14 21:24:22 +0200864static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
865 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200866{
867 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200868 struct sk_buff *skb;
869 struct ieee80211_tx_info *tx_info;
870 struct ieee80211_tx_rate *rates;
871 const struct ieee80211_rate *rate;
872 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200873 int i;
874 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200875
876 skb = bf->bf_mpdu;
877 tx_info = IEEE80211_SKB_CB(skb);
878 rates = tx_info->control.rates;
879 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200880
881 /* set dur_update_en for l-sig computation except for PS-Poll frames */
882 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200883
884 /*
885 * We check if Short Preamble is needed for the CTS rate by
886 * checking the BSS's global flag.
887 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
888 */
889 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200890 info->rtscts_rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200891 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau493cf042011-09-14 21:24:22 +0200892 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200893
894 for (i = 0; i < 4; i++) {
895 bool is_40, is_sgi, is_sp;
896 int phy;
897
898 if (!rates[i].count || (rates[i].idx < 0))
899 continue;
900
901 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200902 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200903
904 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200905 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
906 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200907 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200908 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
909 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200910 }
911
912 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200913 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200914 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200915 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200916
917 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
918 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
919 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
920
921 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
922 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200923 info->rates[i].Rate = rix | 0x80;
924 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
925 ah->txchainmask, info->rates[i].Rate);
926 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200927 is_40, is_sgi, is_sp);
928 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200929 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200930 continue;
931 }
932
933 /* legacy rates */
934 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
935 !(rate->flags & IEEE80211_RATE_ERP_G))
936 phy = WLAN_RC_PHY_CCK;
937 else
938 phy = WLAN_RC_PHY_OFDM;
939
940 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +0200941 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200942 if (rate->hw_value_short) {
943 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +0200944 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200945 } else {
946 is_sp = false;
947 }
948
949 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +0200950 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200951 else
Felix Fietkau493cf042011-09-14 21:24:22 +0200952 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
953 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200954
Felix Fietkau493cf042011-09-14 21:24:22 +0200955 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200956 phy, rate->bitrate * 100, len, rix, is_sp);
957 }
958
959 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
960 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +0200961 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200962
963 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +0200964 if (info->flags & ATH9K_TXDESC_RTSENA)
965 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200966}
967
Felix Fietkau493cf042011-09-14 21:24:22 +0200968static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
969{
970 struct ieee80211_hdr *hdr;
971 enum ath9k_pkt_type htype;
972 __le16 fc;
973
974 hdr = (struct ieee80211_hdr *)skb->data;
975 fc = hdr->frame_control;
976
977 if (ieee80211_is_beacon(fc))
978 htype = ATH9K_PKT_TYPE_BEACON;
979 else if (ieee80211_is_probe_resp(fc))
980 htype = ATH9K_PKT_TYPE_PROBE_RESP;
981 else if (ieee80211_is_atim(fc))
982 htype = ATH9K_PKT_TYPE_ATIM;
983 else if (ieee80211_is_pspoll(fc))
984 htype = ATH9K_PKT_TYPE_PSPOLL;
985 else
986 htype = ATH9K_PKT_TYPE_NORMAL;
987
988 return htype;
989}
990
991static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
992 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +0200993{
994 struct ath_hw *ah = sc->sc_ah;
995 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
996 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +0200997 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +0200998 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +0200999
Felix Fietkau493cf042011-09-14 21:24:22 +02001000 memset(&info, 0, sizeof(info));
1001 info.is_first = true;
1002 info.is_last = true;
1003 info.txpower = MAX_RATE_POWER;
1004 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001005
Felix Fietkau493cf042011-09-14 21:24:22 +02001006 info.flags = ATH9K_TXDESC_INTREQ;
1007 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1008 info.flags |= ATH9K_TXDESC_NOACK;
1009 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1010 info.flags |= ATH9K_TXDESC_LDPC;
1011
1012 ath_buf_set_rate(sc, bf, &info, len);
1013
1014 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1015 info.flags |= ATH9K_TXDESC_CLRDMASK;
1016
1017 if (bf->bf_state.bfs_paprd)
1018 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1019
Felix Fietkau399c6482011-09-14 21:24:17 +02001020
1021 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001022 struct sk_buff *skb = bf->bf_mpdu;
1023 struct ath_frame_info *fi = get_frame_info(skb);
1024
1025 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001026 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001027 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001028 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001029 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001030
John W. Linville42cecc32011-09-19 15:42:31 -04001031 info.buf_addr[0] = bf->bf_buf_addr;
1032 info.buf_len[0] = skb->len;
Felix Fietkau493cf042011-09-14 21:24:22 +02001033 info.pkt_len = fi->framelen;
1034 info.keyix = fi->keyix;
1035 info.keytype = fi->keytype;
1036
1037 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001038 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001039 info.aggr = AGGR_BUF_FIRST;
1040 else if (!bf->bf_next)
1041 info.aggr = AGGR_BUF_LAST;
1042 else
1043 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001044
Felix Fietkau493cf042011-09-14 21:24:22 +02001045 info.ndelim = bf->bf_state.ndelim;
1046 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001047 }
1048
Felix Fietkau493cf042011-09-14 21:24:22 +02001049 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001050 bf = bf->bf_next;
1051 }
1052}
1053
Sujithe8324352009-01-16 21:38:42 +05301054static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1055 struct ath_atx_tid *tid)
1056{
Sujithd43f30152009-01-16 21:38:53 +05301057 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301058 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001059 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301060 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001061 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301062
1063 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001064 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301065 return;
1066
1067 INIT_LIST_HEAD(&bf_q);
1068
Felix Fietkau269c44b2010-11-14 15:20:06 +01001069 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301070
1071 /*
Sujithd43f30152009-01-16 21:38:53 +05301072 * no frames picked up to be aggregated;
1073 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301074 */
1075 if (list_empty(&bf_q))
1076 break;
1077
1078 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301079 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001080 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301081
Felix Fietkau55195412011-04-17 23:28:09 +02001082 if (tid->ac->clear_ps_filter) {
1083 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001084 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1085 } else {
1086 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001087 }
1088
Sujithd43f30152009-01-16 21:38:53 +05301089 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001090 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001091 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1092 bf->bf_state.bf_type = BUF_AMPDU;
1093 } else {
1094 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301095 }
1096
Felix Fietkau493cf042011-09-14 21:24:22 +02001097 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001098 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001099 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301100 status != ATH_AGGR_BAW_CLOSED);
1101}
1102
Felix Fietkau231c3a12010-09-20 19:35:28 +02001103int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1104 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301105{
1106 struct ath_atx_tid *txtid;
1107 struct ath_node *an;
1108
1109 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301110 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001111
1112 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1113 return -EAGAIN;
1114
Sujithf83da962009-07-23 15:32:37 +05301115 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001116 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001117 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001118
Felix Fietkau2ed72222011-01-10 17:05:49 -07001119 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1120 txtid->baw_head = txtid->baw_tail = 0;
1121
Felix Fietkau231c3a12010-09-20 19:35:28 +02001122 return 0;
Sujithe8324352009-01-16 21:38:42 +05301123}
1124
Sujithf83da962009-07-23 15:32:37 +05301125void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301126{
1127 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1128 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001129 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301130
1131 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301132 return;
Sujithe8324352009-01-16 21:38:42 +05301133
1134 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301135 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301136 return;
Sujithe8324352009-01-16 21:38:42 +05301137 }
1138
Sujithe8324352009-01-16 21:38:42 +05301139 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001140 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001141
1142 /*
1143 * If frames are still being transmitted for this TID, they will be
1144 * cleaned up during tx completion. To prevent race conditions, this
1145 * TID can only be reused after all in-progress subframes have been
1146 * completed.
1147 */
1148 if (txtid->baw_head != txtid->baw_tail)
1149 txtid->state |= AGGR_CLEANUP;
1150 else
1151 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +05301152 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301153
Felix Fietkau90fa5392010-09-20 13:45:38 +02001154 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +05301155}
1156
Johannes Berg042ec452011-09-29 16:04:26 +02001157void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1158 struct ath_node *an)
Felix Fietkau55195412011-04-17 23:28:09 +02001159{
1160 struct ath_atx_tid *tid;
1161 struct ath_atx_ac *ac;
1162 struct ath_txq *txq;
Johannes Berg042ec452011-09-29 16:04:26 +02001163 bool buffered;
Felix Fietkau55195412011-04-17 23:28:09 +02001164 int tidno;
1165
1166 for (tidno = 0, tid = &an->tid[tidno];
1167 tidno < WME_NUM_TID; tidno++, tid++) {
1168
1169 if (!tid->sched)
1170 continue;
1171
1172 ac = tid->ac;
1173 txq = ac->txq;
1174
1175 spin_lock_bh(&txq->axq_lock);
1176
Johannes Berg042ec452011-09-29 16:04:26 +02001177 buffered = !skb_queue_empty(&tid->buf_q);
Felix Fietkau55195412011-04-17 23:28:09 +02001178
1179 tid->sched = false;
1180 list_del(&tid->list);
1181
1182 if (ac->sched) {
1183 ac->sched = false;
1184 list_del(&ac->list);
1185 }
1186
1187 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +02001188
Johannes Berg042ec452011-09-29 16:04:26 +02001189 ieee80211_sta_set_buffered(sta, tidno, buffered);
1190 }
Felix Fietkau55195412011-04-17 23:28:09 +02001191}
1192
1193void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1194{
1195 struct ath_atx_tid *tid;
1196 struct ath_atx_ac *ac;
1197 struct ath_txq *txq;
1198 int tidno;
1199
1200 for (tidno = 0, tid = &an->tid[tidno];
1201 tidno < WME_NUM_TID; tidno++, tid++) {
1202
1203 ac = tid->ac;
1204 txq = ac->txq;
1205
1206 spin_lock_bh(&txq->axq_lock);
1207 ac->clear_ps_filter = true;
1208
Felix Fietkau56dc6332011-08-28 00:32:22 +02001209 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001210 ath_tx_queue_tid(txq, tid);
1211 ath_txq_schedule(sc, txq);
1212 }
1213
1214 spin_unlock_bh(&txq->axq_lock);
1215 }
1216}
1217
Sujithe8324352009-01-16 21:38:42 +05301218void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1219{
1220 struct ath_atx_tid *txtid;
1221 struct ath_node *an;
1222
1223 an = (struct ath_node *)sta->drv_priv;
1224
1225 if (sc->sc_flags & SC_OP_TXAGGR) {
1226 txtid = ATH_AN_2_TID(an, tid);
1227 txtid->baw_size =
1228 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1229 txtid->state |= AGGR_ADDBA_COMPLETE;
1230 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1231 ath_tx_resume_tid(sc, txtid);
1232 }
1233}
1234
Sujithe8324352009-01-16 21:38:42 +05301235/********************/
1236/* Queue Management */
1237/********************/
1238
Sujithe8324352009-01-16 21:38:42 +05301239static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1240 struct ath_txq *txq)
1241{
1242 struct ath_atx_ac *ac, *ac_tmp;
1243 struct ath_atx_tid *tid, *tid_tmp;
1244
1245 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1246 list_del(&ac->list);
1247 ac->sched = false;
1248 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1249 list_del(&tid->list);
1250 tid->sched = false;
1251 ath_tid_drain(sc, txq, tid);
1252 }
1253 }
1254}
1255
1256struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1257{
Sujithcbe61d82009-02-09 13:27:12 +05301258 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301259 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001260 static const int subtype_txq_to_hwq[] = {
1261 [WME_AC_BE] = ATH_TXQ_AC_BE,
1262 [WME_AC_BK] = ATH_TXQ_AC_BK,
1263 [WME_AC_VI] = ATH_TXQ_AC_VI,
1264 [WME_AC_VO] = ATH_TXQ_AC_VO,
1265 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001266 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301267
1268 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001269 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301270 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1271 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1272 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1273 qi.tqi_physCompBuf = 0;
1274
1275 /*
1276 * Enable interrupts only for EOL and DESC conditions.
1277 * We mark tx descriptors to receive a DESC interrupt
1278 * when a tx queue gets deep; otherwise waiting for the
1279 * EOL to reap descriptors. Note that this is done to
1280 * reduce interrupt load and this only defers reaping
1281 * descriptors, never transmitting frames. Aside from
1282 * reducing interrupts this also permits more concurrency.
1283 * The only potential downside is if the tx queue backs
1284 * up in which case the top half of the kernel may backup
1285 * due to a lack of tx descriptors.
1286 *
1287 * The UAPSD queue is an exception, since we take a desc-
1288 * based intr on the EOSP frames.
1289 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001290 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1291 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1292 TXQ_FLAG_TXERRINT_ENABLE;
1293 } else {
1294 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1295 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1296 else
1297 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1298 TXQ_FLAG_TXDESCINT_ENABLE;
1299 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001300 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1301 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301302 /*
1303 * NB: don't print a message, this happens
1304 * normally on parts with too few tx queues
1305 */
1306 return NULL;
1307 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001308 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1309 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301310
Ben Greear60f2d1d2011-01-09 23:11:52 -08001311 txq->axq_qnum = axq_qnum;
1312 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301313 txq->axq_link = NULL;
1314 INIT_LIST_HEAD(&txq->axq_q);
1315 INIT_LIST_HEAD(&txq->axq_acq);
1316 spin_lock_init(&txq->axq_lock);
1317 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001318 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001319 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001320 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001321
1322 txq->txq_headidx = txq->txq_tailidx = 0;
1323 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1324 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301325 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001326 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301327}
1328
Sujithe8324352009-01-16 21:38:42 +05301329int ath_txq_update(struct ath_softc *sc, int qnum,
1330 struct ath9k_tx_queue_info *qinfo)
1331{
Sujithcbe61d82009-02-09 13:27:12 +05301332 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301333 int error = 0;
1334 struct ath9k_tx_queue_info qi;
1335
1336 if (qnum == sc->beacon.beaconq) {
1337 /*
1338 * XXX: for beacon queue, we just save the parameter.
1339 * It will be picked up by ath_beaconq_config when
1340 * it's necessary.
1341 */
1342 sc->beacon.beacon_qi = *qinfo;
1343 return 0;
1344 }
1345
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001346 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301347
1348 ath9k_hw_get_txq_props(ah, qnum, &qi);
1349 qi.tqi_aifs = qinfo->tqi_aifs;
1350 qi.tqi_cwmin = qinfo->tqi_cwmin;
1351 qi.tqi_cwmax = qinfo->tqi_cwmax;
1352 qi.tqi_burstTime = qinfo->tqi_burstTime;
1353 qi.tqi_readyTime = qinfo->tqi_readyTime;
1354
1355 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001356 ath_err(ath9k_hw_common(sc->sc_ah),
1357 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301358 error = -EIO;
1359 } else {
1360 ath9k_hw_resettxqueue(ah, qnum);
1361 }
1362
1363 return error;
1364}
1365
1366int ath_cabq_update(struct ath_softc *sc)
1367{
1368 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001369 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301370 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301371
1372 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1373 /*
1374 * Ensure the readytime % is within the bounds.
1375 */
Sujith17d79042009-02-09 13:27:03 +05301376 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1377 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1378 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1379 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301380
Steve Brown9814f6b2011-02-07 17:10:39 -07001381 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301382 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301383 ath_txq_update(sc, qnum, &qi);
1384
1385 return 0;
1386}
1387
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001388static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1389{
1390 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1391 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1392}
1393
Felix Fietkaufce041b2011-05-19 12:20:25 +02001394static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1395 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301396 __releases(txq->axq_lock)
1397 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301398{
1399 struct ath_buf *bf, *lastbf;
1400 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001401 struct ath_tx_status ts;
1402
1403 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301404 INIT_LIST_HEAD(&bf_head);
1405
Felix Fietkaufce041b2011-05-19 12:20:25 +02001406 while (!list_empty(list)) {
1407 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301408
Felix Fietkaufce041b2011-05-19 12:20:25 +02001409 if (bf->bf_stale) {
1410 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301411
Felix Fietkaufce041b2011-05-19 12:20:25 +02001412 ath_tx_return_buffer(sc, bf);
1413 continue;
Sujithe8324352009-01-16 21:38:42 +05301414 }
1415
1416 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001417 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001418
Sujithe8324352009-01-16 21:38:42 +05301419 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001420 if (bf_is_ampdu_not_probing(bf))
1421 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301422
Felix Fietkaufce041b2011-05-19 12:20:25 +02001423 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301424 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001425 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1426 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301427 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001428 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001429 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001430 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001431}
1432
1433/*
1434 * Drain a given TX queue (could be Beacon or Data)
1435 *
1436 * This assumes output has been stopped and
1437 * we do not need to block ath_tx_tasklet.
1438 */
1439void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1440{
1441 spin_lock_bh(&txq->axq_lock);
1442 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1443 int idx = txq->txq_tailidx;
1444
1445 while (!list_empty(&txq->txq_fifo[idx])) {
1446 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1447 retry_tx);
1448
1449 INCR(idx, ATH_TXFIFO_DEPTH);
1450 }
1451 txq->txq_tailidx = idx;
1452 }
1453
1454 txq->axq_link = NULL;
1455 txq->axq_tx_inprogress = false;
1456 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001457
1458 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001459 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1460 ath_txq_drain_pending_buffers(sc, txq);
1461
1462 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301463}
1464
Felix Fietkau080e1a22010-12-05 20:17:53 +01001465bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301466{
Sujithcbe61d82009-02-09 13:27:12 +05301467 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001468 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301469 struct ath_txq *txq;
Felix Fietkau34d25812011-10-07 02:28:12 +02001470 int i;
1471 u32 npend = 0;
Sujith043a0402009-01-16 21:38:47 +05301472
1473 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001474 return true;
Sujith043a0402009-01-16 21:38:47 +05301475
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001476 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301477
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001478 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301479 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001480 if (!ATH_TXQ_SETUP(sc, i))
1481 continue;
1482
Felix Fietkau34d25812011-10-07 02:28:12 +02001483 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1484 npend |= BIT(i);
Sujith043a0402009-01-16 21:38:47 +05301485 }
1486
Felix Fietkau080e1a22010-12-05 20:17:53 +01001487 if (npend)
Felix Fietkau34d25812011-10-07 02:28:12 +02001488 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
Sujith043a0402009-01-16 21:38:47 +05301489
1490 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001491 if (!ATH_TXQ_SETUP(sc, i))
1492 continue;
1493
1494 /*
1495 * The caller will resume queues with ieee80211_wake_queues.
1496 * Mark the queue as not stopped to prevent ath_tx_complete
1497 * from waking the queue too early.
1498 */
1499 txq = &sc->tx.txq[i];
1500 txq->stopped = false;
1501 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301502 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001503
1504 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301505}
1506
Sujithe8324352009-01-16 21:38:42 +05301507void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1508{
1509 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1510 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1511}
1512
Ben Greear7755bad2011-01-18 17:30:00 -08001513/* For each axq_acq entry, for each tid, try to schedule packets
1514 * for transmit until ampdu_depth has reached min Q depth.
1515 */
Sujithe8324352009-01-16 21:38:42 +05301516void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1517{
Ben Greear7755bad2011-01-18 17:30:00 -08001518 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1519 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301520
Felix Fietkau236de512011-09-03 01:40:25 +02001521 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001522 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301523 return;
1524
1525 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001526 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301527
Ben Greear7755bad2011-01-18 17:30:00 -08001528 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1529 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1530 list_del(&ac->list);
1531 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301532
Ben Greear7755bad2011-01-18 17:30:00 -08001533 while (!list_empty(&ac->tid_q)) {
1534 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1535 list);
1536 list_del(&tid->list);
1537 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301538
Ben Greear7755bad2011-01-18 17:30:00 -08001539 if (tid->paused)
1540 continue;
Sujithe8324352009-01-16 21:38:42 +05301541
Ben Greear7755bad2011-01-18 17:30:00 -08001542 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301543
Ben Greear7755bad2011-01-18 17:30:00 -08001544 /*
1545 * add tid to round-robin queue if more frames
1546 * are pending for the tid
1547 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001548 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001549 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301550
Ben Greear7755bad2011-01-18 17:30:00 -08001551 if (tid == last_tid ||
1552 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1553 break;
Sujithe8324352009-01-16 21:38:42 +05301554 }
Ben Greear7755bad2011-01-18 17:30:00 -08001555
1556 if (!list_empty(&ac->tid_q)) {
1557 if (!ac->sched) {
1558 ac->sched = true;
1559 list_add_tail(&ac->list, &txq->axq_acq);
1560 }
1561 }
1562
1563 if (ac == last_ac ||
1564 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1565 return;
Sujithe8324352009-01-16 21:38:42 +05301566 }
1567}
1568
Sujithe8324352009-01-16 21:38:42 +05301569/***********/
1570/* TX, DMA */
1571/***********/
1572
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001573/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001574 * Insert a chain of ath_buf (descriptors) on a txq and
1575 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001576 */
Sujith102e0572008-10-29 10:15:16 +05301577static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001578 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001579{
Sujithcbe61d82009-02-09 13:27:12 +05301580 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001581 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001582 struct ath_buf *bf, *bf_last;
1583 bool puttxbuf = false;
1584 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301585
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001586 /*
1587 * Insert the frame on the outbound list and
1588 * pass it on to the hardware.
1589 */
1590
1591 if (list_empty(head))
1592 return;
1593
Felix Fietkaufce041b2011-05-19 12:20:25 +02001594 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001595 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001596 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001597
Joe Perches226afe62010-12-02 19:12:37 -08001598 ath_dbg(common, ATH_DBG_QUEUE,
1599 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001600
Felix Fietkaufce041b2011-05-19 12:20:25 +02001601 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1602 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001603 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001604 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001605 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001606 list_splice_tail_init(head, &txq->axq_q);
1607
Felix Fietkaufce041b2011-05-19 12:20:25 +02001608 if (txq->axq_link) {
1609 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001610 ath_dbg(common, ATH_DBG_XMIT,
1611 "link[%u] (%p)=%llx (%p)\n",
1612 txq->axq_qnum, txq->axq_link,
1613 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001614 } else if (!edma)
1615 puttxbuf = true;
1616
1617 txq->axq_link = bf_last->bf_desc;
1618 }
1619
1620 if (puttxbuf) {
1621 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1622 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1623 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1624 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1625 }
1626
1627 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001628 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001629 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001630 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001631
1632 if (!internal) {
1633 txq->axq_depth++;
1634 if (bf_is_ampdu_not_probing(bf))
1635 txq->axq_ampdu_depth++;
1636 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001637}
1638
Sujithe8324352009-01-16 21:38:42 +05301639static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001640 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301641{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001642 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001643 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001644 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301645
1646 /*
1647 * Do not queue to h/w when any of the following conditions is true:
1648 * - there are pending frames in software queue
1649 * - the TID is currently paused for ADDBA/BAR request
1650 * - seqno is not within block-ack window
1651 * - h/w queue depth exceeds low water mark
1652 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001653 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001654 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001655 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001656 /*
Sujithe8324352009-01-16 21:38:42 +05301657 * Add this frame to software queue for scheduling later
1658 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001659 */
Ben Greearbda8add2011-01-09 23:11:48 -08001660 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001661 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001662 if (!txctl->an || !txctl->an->sleeping)
1663 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301664 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001665 }
1666
Felix Fietkau44f1d262011-08-28 00:32:25 +02001667 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1668 if (!bf)
1669 return;
1670
Felix Fietkau399c6482011-09-14 21:24:17 +02001671 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001672 INIT_LIST_HEAD(&bf_head);
1673 list_add(&bf->list, &bf_head);
1674
Sujithe8324352009-01-16 21:38:42 +05301675 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001676 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301677
1678 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001679 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301680 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001681 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001682 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301683}
1684
Felix Fietkau82b873a2010-11-11 03:18:37 +01001685static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001686 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001687{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001688 struct ath_frame_info *fi = get_frame_info(skb);
1689 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301690 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001691
Felix Fietkau44f1d262011-08-28 00:32:25 +02001692 bf = fi->bf;
1693 if (!bf)
1694 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1695
1696 if (!bf)
1697 return;
1698
1699 INIT_LIST_HEAD(&bf_head);
1700 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001701 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301702
1703 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001704 if (tid)
1705 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301706
Sujithd43f30152009-01-16 21:38:53 +05301707 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001708 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001709 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301710 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001711}
1712
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001713static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1714 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301715{
1716 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001717 struct ieee80211_sta *sta = tx_info->control.sta;
1718 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001719 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001720 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001721 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001722 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301723
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001724 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301725
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001726 if (sta)
1727 an = (struct ath_node *) sta->drv_priv;
1728
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001729 memset(fi, 0, sizeof(*fi));
1730 if (hw_key)
1731 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001732 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1733 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001734 else
1735 fi->keyix = ATH9K_TXKEYIX_INVALID;
1736 fi->keytype = keytype;
1737 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301738}
1739
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301740u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1741{
1742 struct ath_hw *ah = sc->sc_ah;
1743 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301744 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1745 (curchan->channelFlags & CHANNEL_5GHZ) &&
1746 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301747 return 0x3;
1748 else
1749 return chainmask;
1750}
1751
Felix Fietkau44f1d262011-08-28 00:32:25 +02001752/*
1753 * Assign a descriptor (and sequence number if necessary,
1754 * and map buffer for DMA. Frees skb on error
1755 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001756static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001757 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001758 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001759 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301760{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001761 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001762 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001763 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001764 struct ath_buf *bf;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001765 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001766
1767 bf = ath_tx_get_buffer(sc);
1768 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001769 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001770 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001771 }
Sujithe8324352009-01-16 21:38:42 +05301772
Sujithe8324352009-01-16 21:38:42 +05301773 ATH_TXBUF_RESET(bf);
1774
Felix Fietkaufa05f872011-08-28 00:32:24 +02001775 if (tid) {
1776 seqno = tid->seq_next;
1777 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1778 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1779 bf->bf_state.seqno = seqno;
1780 }
1781
Sujithe8324352009-01-16 21:38:42 +05301782 bf->bf_mpdu = skb;
1783
Ben Greearc1739eb32010-10-14 12:45:29 -07001784 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1785 skb->len, DMA_TO_DEVICE);
1786 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301787 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001788 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001789 ath_err(ath9k_hw_common(sc->sc_ah),
1790 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001791 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001792 goto error;
Sujithe8324352009-01-16 21:38:42 +05301793 }
1794
Felix Fietkau56dc6332011-08-28 00:32:22 +02001795 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001796
1797 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001798
1799error:
1800 dev_kfree_skb_any(skb);
1801 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001802}
1803
1804/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001805static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001806 struct ath_tx_control *txctl)
1807{
Felix Fietkau04caf862010-11-14 15:20:12 +01001808 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1809 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001810 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001811 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001812 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301813
Sujithe8324352009-01-16 21:38:42 +05301814 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301815 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1816 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001817 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1818 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001819 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001820
Felix Fietkau066dae92010-11-07 14:59:39 +01001821 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001822 }
1823
1824 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001825 /*
1826 * Try aggregation if it's a unicast data frame
1827 * and the destination is HT capable.
1828 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001829 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301830 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001831 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1832 if (!bf)
1833 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001834
Felix Fietkau82b873a2010-11-11 03:18:37 +01001835 bf->bf_state.bfs_paprd = txctl->paprd;
1836
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301837 if (txctl->paprd)
1838 bf->bf_state.bfs_paprd_timestamp = jiffies;
1839
Felix Fietkau44f1d262011-08-28 00:32:25 +02001840 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301841 }
1842
Felix Fietkaufa05f872011-08-28 00:32:24 +02001843out:
Sujithe8324352009-01-16 21:38:42 +05301844 spin_unlock_bh(&txctl->txq->axq_lock);
1845}
1846
1847/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001848int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301849 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001850{
Felix Fietkau28d16702010-11-14 15:20:10 +01001851 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1852 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001853 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001854 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001855 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001856 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001857 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001858 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001859 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001860
Ben Greeara9927ba2010-12-06 21:13:49 -08001861 /* NOTE: sta can be NULL according to net/mac80211.h */
1862 if (sta)
1863 txctl->an = (struct ath_node *)sta->drv_priv;
1864
Felix Fietkau04caf862010-11-14 15:20:12 +01001865 if (info->control.hw_key)
1866 frmlen += info->control.hw_key->icv_len;
1867
Felix Fietkau28d16702010-11-14 15:20:10 +01001868 /*
1869 * As a temporary workaround, assign seq# here; this will likely need
1870 * to be cleaned up to work better with Beacon transmission and virtual
1871 * BSSes.
1872 */
1873 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1874 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1875 sc->tx.seq_no += 0x10;
1876 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1877 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1878 }
1879
John W. Linville42cecc32011-09-19 15:42:31 -04001880 /* Add the padding after the header if this is not already done */
1881 padpos = ath9k_cmn_padpos(hdr->frame_control);
1882 padsize = padpos & 3;
1883 if (padsize && skb->len > padpos) {
1884 if (skb_headroom(skb) < padsize)
1885 return -ENOMEM;
Felix Fietkau28d16702010-11-14 15:20:10 +01001886
John W. Linville42cecc32011-09-19 15:42:31 -04001887 skb_push(skb, padsize);
1888 memmove(skb->data, skb->data + padsize, padpos);
Felix Fietkau6e82bc4a2011-09-15 10:03:12 +02001889 hdr = (struct ieee80211_hdr *) skb->data;
Felix Fietkau28d16702010-11-14 15:20:10 +01001890 }
1891
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001892 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1893 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1894 !ieee80211_is_data(hdr->frame_control))
1895 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1896
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001897 setup_frame_info(hw, skb, frmlen);
1898
1899 /*
1900 * At this point, the vif, hw_key and sta pointers in the tx control
1901 * info are no longer valid (overwritten by the ath_frame_info data.
1902 */
1903
Felix Fietkau066dae92010-11-07 14:59:39 +01001904 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001905 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001906 if (txq == sc->tx.txq_map[q] &&
1907 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001908 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001909 txq->stopped = 1;
1910 }
1911 spin_unlock_bh(&txq->axq_lock);
1912
Felix Fietkau44f1d262011-08-28 00:32:25 +02001913 ath_tx_start_dma(sc, skb, txctl);
1914 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001915}
1916
Sujithe8324352009-01-16 21:38:42 +05301917/*****************/
1918/* TX Completion */
1919/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001920
Sujithe8324352009-01-16 21:38:42 +05301921static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301922 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001923{
Sujithe8324352009-01-16 21:38:42 +05301924 struct ieee80211_hw *hw = sc->hw;
1925 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001926 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001927 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001928 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301929
Joe Perches226afe62010-12-02 19:12:37 -08001930 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301931
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301932 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301933 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301934
Felix Fietkau55797b12011-09-14 21:24:16 +02001935 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301936 /* Frame was ACKed */
1937 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301938
John W. Linville42cecc32011-09-19 15:42:31 -04001939 padpos = ath9k_cmn_padpos(hdr->frame_control);
1940 padsize = padpos & 3;
1941 if (padsize && skb->len>padpos+padsize) {
1942 /*
1943 * Remove MAC header padding before giving the frame back to
1944 * mac80211.
1945 */
1946 memmove(skb->data + padsize, skb->data, padpos);
1947 skb_pull(skb, padsize);
Sujithe8324352009-01-16 21:38:42 +05301948 }
1949
Sujith1b04b932010-01-08 10:36:05 +05301950 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1951 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001952 ath_dbg(common, ATH_DBG_PS,
1953 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301954 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1955 PS_WAIT_FOR_CAB |
1956 PS_WAIT_FOR_PSPOLL_DATA |
1957 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001958 }
1959
Felix Fietkau7545daf2011-01-24 19:23:16 +01001960 q = skb_get_queue_mapping(skb);
1961 if (txq == sc->tx.txq_map[q]) {
1962 spin_lock_bh(&txq->axq_lock);
1963 if (WARN_ON(--txq->pending_frames < 0))
1964 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001965
Felix Fietkau7545daf2011-01-24 19:23:16 +01001966 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1967 ieee80211_wake_queue(sc->hw, q);
1968 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001969 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001970 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001971 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001972
1973 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301974}
1975
1976static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001977 struct ath_txq *txq, struct list_head *bf_q,
1978 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301979{
1980 struct sk_buff *skb = bf->bf_mpdu;
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001981 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithe8324352009-01-16 21:38:42 +05301982 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301983 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301984
Sujithe8324352009-01-16 21:38:42 +05301985 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301986 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301987
Felix Fietkau55797b12011-09-14 21:24:16 +02001988 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301989 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301990
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001991 if (ts->ts_status & ATH9K_TXERR_FILT)
1992 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1993
Ben Greearc1739eb32010-10-14 12:45:29 -07001994 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001995 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001996
1997 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301998 if (time_after(jiffies,
1999 bf->bf_state.bfs_paprd_timestamp +
2000 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002001 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002002 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002003 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002004 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002005 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302006 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002007 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002008 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2009 * accidentally reference it later.
2010 */
2011 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302012
2013 /*
2014 * Return the list of ath_buf of this mpdu to free queue
2015 */
2016 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2017 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2018 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2019}
2020
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002021static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2022 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002023 int txok)
Sujithc4288392008-11-18 09:09:30 +05302024{
Sujitha22be222009-03-30 15:28:36 +05302025 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302026 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302027 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002028 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002029 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302030 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302031
Sujith95e4acb2009-03-13 08:56:09 +05302032 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002033 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302034
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002035 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302036 WARN_ON(tx_rateindex >= hw->max_rates);
2037
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002038 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002039 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302040
Felix Fietkaub572d032010-11-14 15:20:07 +01002041 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002042 }
Rajkumar Manoharan185d1582011-09-26 21:48:39 +05302043 tx_info->status.ampdu_len = nframes;
2044 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002045
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002046 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002047 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002048 /*
2049 * If an underrun error is seen assume it as an excessive
2050 * retry only if max frame trigger level has been reached
2051 * (2 KB for single stream, and 4 KB for dual stream).
2052 * Adjust the long retry as if the frame was tried
2053 * hw->max_rate_tries times to affect how rate control updates
2054 * PER for the failed rate.
2055 * In case of congestion on the bus penalizing this type of
2056 * underruns should help hardware actually transmit new frames
2057 * successfully by eventually preferring slower rates.
2058 * This itself should also alleviate congestion on the bus.
2059 */
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002060 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2061 ATH9K_TX_DELIM_UNDERRUN)) &&
2062 ieee80211_is_data(hdr->frame_control) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002063 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002064 tx_info->status.rates[tx_rateindex].count =
2065 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302066 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302067
Felix Fietkau545750d2009-11-23 22:21:01 +01002068 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302069 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002070 tx_info->status.rates[i].idx = -1;
2071 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302072
Felix Fietkau78c46532010-06-25 01:26:16 +02002073 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302074}
2075
Felix Fietkaufce041b2011-05-19 12:20:25 +02002076static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2077 struct ath_tx_status *ts, struct ath_buf *bf,
2078 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302079 __releases(txq->axq_lock)
2080 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002081{
2082 int txok;
2083
2084 txq->axq_depth--;
2085 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2086 txq->axq_tx_inprogress = false;
2087 if (bf_is_ampdu_not_probing(bf))
2088 txq->axq_ampdu_depth--;
2089
2090 spin_unlock_bh(&txq->axq_lock);
2091
2092 if (!bf_isampdu(bf)) {
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002093 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002094 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2095 } else
2096 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2097
2098 spin_lock_bh(&txq->axq_lock);
2099
2100 if (sc->sc_flags & SC_OP_TXAGGR)
2101 ath_txq_schedule(sc, txq);
2102}
2103
Sujithc4288392008-11-18 09:09:30 +05302104static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002105{
Sujithcbe61d82009-02-09 13:27:12 +05302106 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002107 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002108 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2109 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302110 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002111 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002112 int status;
2113
Joe Perches226afe62010-12-02 19:12:37 -08002114 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2115 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2116 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002117
Felix Fietkaufce041b2011-05-19 12:20:25 +02002118 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002119 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002120 if (work_pending(&sc->hw_reset_work))
2121 break;
2122
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002123 if (list_empty(&txq->axq_q)) {
2124 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002125 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002126 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002127 break;
2128 }
2129 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2130
2131 /*
2132 * There is a race condition that a BH gets scheduled
2133 * after sw writes TxE and before hw re-load the last
2134 * descriptor to get the newly chained one.
2135 * Software must keep the last DONE descriptor as a
2136 * holding descriptor - software does so by marking
2137 * it with the STALE flag.
2138 */
2139 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302140 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002141 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002142 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002143 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002144
2145 bf = list_entry(bf_held->list.next, struct ath_buf,
2146 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002147 }
2148
2149 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302150 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002151
Felix Fietkau29bffa92010-03-29 20:14:23 -07002152 memset(&ts, 0, sizeof(ts));
2153 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002154 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002155 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002156
Ben Greear2dac4fb2011-01-09 23:11:45 -08002157 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002158
2159 /*
2160 * Remove ath_buf's of the same transmit unit from txq,
2161 * however leave the last descriptor back as the holding
2162 * descriptor for hw.
2163 */
Sujitha119cc42009-03-30 15:28:38 +05302164 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002165 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002166 if (!list_is_singular(&lastbf->list))
2167 list_cut_position(&bf_head,
2168 &txq->axq_q, lastbf->list.prev);
2169
Felix Fietkaufce041b2011-05-19 12:20:25 +02002170 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002171 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002172 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002173 }
Johannes Berge6a98542008-10-21 12:40:02 +02002174
Felix Fietkaufce041b2011-05-19 12:20:25 +02002175 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002176 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002177 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002178}
2179
Sujith305fe472009-07-23 15:32:29 +05302180static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002181{
2182 struct ath_softc *sc = container_of(work, struct ath_softc,
2183 tx_complete_work.work);
2184 struct ath_txq *txq;
2185 int i;
2186 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002187#ifdef CONFIG_ATH9K_DEBUGFS
2188 sc->tx_complete_poll_work_seen++;
2189#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002190
2191 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2192 if (ATH_TXQ_SETUP(sc, i)) {
2193 txq = &sc->tx.txq[i];
2194 spin_lock_bh(&txq->axq_lock);
2195 if (txq->axq_depth) {
2196 if (txq->axq_tx_inprogress) {
2197 needreset = true;
2198 spin_unlock_bh(&txq->axq_lock);
2199 break;
2200 } else {
2201 txq->axq_tx_inprogress = true;
2202 }
2203 }
2204 spin_unlock_bh(&txq->axq_lock);
2205 }
2206
2207 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002208 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2209 "tx hung, resetting the chip\n");
Felix Fietkau030d6292011-10-07 02:28:13 +02002210 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
Felix Fietkau236de512011-09-03 01:40:25 +02002211 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002212 }
2213
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002214 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002215 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2216}
2217
2218
Sujithe8324352009-01-16 21:38:42 +05302219
2220void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002221{
Sujithe8324352009-01-16 21:38:42 +05302222 int i;
2223 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002224
Sujithe8324352009-01-16 21:38:42 +05302225 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002226
2227 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302228 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2229 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002230 }
2231}
2232
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002233void ath_tx_edma_tasklet(struct ath_softc *sc)
2234{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002235 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002236 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2237 struct ath_hw *ah = sc->sc_ah;
2238 struct ath_txq *txq;
2239 struct ath_buf *bf, *lastbf;
2240 struct list_head bf_head;
2241 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002242
2243 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002244 if (work_pending(&sc->hw_reset_work))
2245 break;
2246
Felix Fietkaufce041b2011-05-19 12:20:25 +02002247 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002248 if (status == -EINPROGRESS)
2249 break;
2250 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002251 ath_dbg(common, ATH_DBG_XMIT,
2252 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002253 break;
2254 }
2255
2256 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002257 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002258 continue;
2259
Felix Fietkaufce041b2011-05-19 12:20:25 +02002260 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002261
2262 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002263
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002264 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2265 spin_unlock_bh(&txq->axq_lock);
2266 return;
2267 }
2268
2269 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2270 struct ath_buf, list);
2271 lastbf = bf->bf_lastbf;
2272
2273 INIT_LIST_HEAD(&bf_head);
2274 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2275 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002276
Felix Fietkaufce041b2011-05-19 12:20:25 +02002277 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2278 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002279
Felix Fietkaufce041b2011-05-19 12:20:25 +02002280 if (!list_empty(&txq->axq_q)) {
2281 struct list_head bf_q;
2282
2283 INIT_LIST_HEAD(&bf_q);
2284 txq->axq_link = NULL;
2285 list_splice_tail_init(&txq->axq_q, &bf_q);
2286 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2287 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002288 }
2289
Felix Fietkaufce041b2011-05-19 12:20:25 +02002290 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002291 spin_unlock_bh(&txq->axq_lock);
2292 }
2293}
2294
Sujithe8324352009-01-16 21:38:42 +05302295/*****************/
2296/* Init, Cleanup */
2297/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002298
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002299static int ath_txstatus_setup(struct ath_softc *sc, int size)
2300{
2301 struct ath_descdma *dd = &sc->txsdma;
2302 u8 txs_len = sc->sc_ah->caps.txs_len;
2303
2304 dd->dd_desc_len = size * txs_len;
2305 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2306 &dd->dd_desc_paddr, GFP_KERNEL);
2307 if (!dd->dd_desc)
2308 return -ENOMEM;
2309
2310 return 0;
2311}
2312
2313static int ath_tx_edma_init(struct ath_softc *sc)
2314{
2315 int err;
2316
2317 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2318 if (!err)
2319 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2320 sc->txsdma.dd_desc_paddr,
2321 ATH_TXSTATUS_RING_SIZE);
2322
2323 return err;
2324}
2325
2326static void ath_tx_edma_cleanup(struct ath_softc *sc)
2327{
2328 struct ath_descdma *dd = &sc->txsdma;
2329
2330 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2331 dd->dd_desc_paddr);
2332}
2333
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002334int ath_tx_init(struct ath_softc *sc, int nbufs)
2335{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002336 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002337 int error = 0;
2338
Sujith797fe5cb2009-03-30 15:28:45 +05302339 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002340
Sujith797fe5cb2009-03-30 15:28:45 +05302341 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002342 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302343 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002344 ath_err(common,
2345 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302346 goto err;
2347 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002348
Sujith797fe5cb2009-03-30 15:28:45 +05302349 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002350 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302351 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002352 ath_err(common,
2353 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302354 goto err;
2355 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002356
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002357 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2358
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002359 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2360 error = ath_tx_edma_init(sc);
2361 if (error)
2362 goto err;
2363 }
2364
Sujith797fe5cb2009-03-30 15:28:45 +05302365err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002366 if (error != 0)
2367 ath_tx_cleanup(sc);
2368
2369 return error;
2370}
2371
Sujith797fe5cb2009-03-30 15:28:45 +05302372void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002373{
Sujithb77f4832008-12-07 21:44:03 +05302374 if (sc->beacon.bdma.dd_desc_len != 0)
2375 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376
Sujithb77f4832008-12-07 21:44:03 +05302377 if (sc->tx.txdma.dd_desc_len != 0)
2378 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002379
2380 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2381 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002382}
2383
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002384void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2385{
Sujithc5170162008-10-29 10:13:59 +05302386 struct ath_atx_tid *tid;
2387 struct ath_atx_ac *ac;
2388 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002389
Sujith8ee5afb2008-12-07 21:43:36 +05302390 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302391 tidno < WME_NUM_TID;
2392 tidno++, tid++) {
2393 tid->an = an;
2394 tid->tidno = tidno;
2395 tid->seq_start = tid->seq_next = 0;
2396 tid->baw_size = WME_MAX_BA;
2397 tid->baw_head = tid->baw_tail = 0;
2398 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302399 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302400 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002401 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302402 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302403 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302404 tid->state &= ~AGGR_ADDBA_COMPLETE;
2405 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302406 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002407
Sujith8ee5afb2008-12-07 21:43:36 +05302408 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302409 acno < WME_NUM_AC; acno++, ac++) {
2410 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002411 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302412 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002413 }
2414}
2415
Sujithb5aa9bf2008-10-29 10:13:31 +05302416void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002417{
Felix Fietkau2b409942010-07-07 19:42:08 +02002418 struct ath_atx_ac *ac;
2419 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002420 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002421 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302422
Felix Fietkau2b409942010-07-07 19:42:08 +02002423 for (tidno = 0, tid = &an->tid[tidno];
2424 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002425
Felix Fietkau2b409942010-07-07 19:42:08 +02002426 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002427 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002428
Felix Fietkau2b409942010-07-07 19:42:08 +02002429 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002430
Felix Fietkau2b409942010-07-07 19:42:08 +02002431 if (tid->sched) {
2432 list_del(&tid->list);
2433 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002434 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002435
2436 if (ac->sched) {
2437 list_del(&ac->list);
2438 tid->ac->sched = false;
2439 }
2440
2441 ath_tid_drain(sc, txq, tid);
2442 tid->state &= ~AGGR_ADDBA_COMPLETE;
2443 tid->state &= ~AGGR_CLEANUP;
2444
2445 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002446 }
2447}