blob: a0cd51f28596f6778e53ea7dca67b7aeaea006d1 [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +020061 int txok);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Sujithe8324352009-01-16 21:38:42 +0530107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
108{
109 struct ath_atx_ac *ac = tid->ac;
110
111 if (tid->paused)
112 return;
113
114 if (tid->sched)
115 return;
116
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
119
120 if (ac->sched)
121 return;
122
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
126
Sujithe8324352009-01-16 21:38:42 +0530127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128{
Felix Fietkau066dae92010-11-07 14:59:39 +0100129 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530130
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200131 WARN_ON(!tid->paused);
132
Sujithe8324352009-01-16 21:38:42 +0530133 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200134 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530135
Felix Fietkau56dc6332011-08-28 00:32:22 +0200136 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530137 goto unlock;
138
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
143}
144
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100151}
152
Sujithe8324352009-01-16 21:38:42 +0530153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
Felix Fietkau066dae92010-11-07 14:59:39 +0100155 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200156 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530157 struct ath_buf *bf;
158 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100160 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200161
Sujithe8324352009-01-16 21:38:42 +0530162 INIT_LIST_HEAD(&bf_head);
163
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530165 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530166
Felix Fietkau56dc6332011-08-28 00:32:22 +0200167 while ((skb = __skb_dequeue(&tid->buf_q))) {
168 fi = get_frame_info(skb);
169 bf = fi->bf;
170
Felix Fietkaue1566d12010-11-20 03:08:46 +0100171 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200172 if (bf && fi->retries) {
173 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200176 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200177 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200178 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100179 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530180 }
181
182 spin_unlock_bh(&txq->axq_lock);
183}
184
185static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
186 int seqno)
187{
188 int index, cindex;
189
190 index = ATH_BA_INDEX(tid->seq_start, seqno);
191 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
192
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200193 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530194
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200195 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530196 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
197 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
198 }
199}
200
201static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100202 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530203{
204 int index, cindex;
205
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100206 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530207 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200208 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530209
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
214 }
215}
216
217/*
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
222 */
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
225
226{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200227 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530228 struct ath_buf *bf;
229 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700230 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100231 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700232
233 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530234 INIT_LIST_HEAD(&bf_head);
235
Felix Fietkau56dc6332011-08-28 00:32:22 +0200236 while ((skb = __skb_dequeue(&tid->buf_q))) {
237 fi = get_frame_info(skb);
238 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530239
Felix Fietkau44f1d262011-08-28 00:32:25 +0200240 if (!bf) {
241 spin_unlock(&txq->axq_lock);
242 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
243 spin_lock(&txq->axq_lock);
244 continue;
245 }
246
Felix Fietkau56dc6332011-08-28 00:32:22 +0200247 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530248
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100249 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200250 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530251
252 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700253 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530254 spin_lock(&txq->axq_lock);
255 }
256
257 tid->seq_next = tid->seq_start;
258 tid->baw_tail = tid->baw_head;
259}
260
Sujithfec247c2009-07-27 12:08:16 +0530261static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100262 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530263{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100264 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530265 struct ieee80211_hdr *hdr;
266
Sujithfec247c2009-07-27 12:08:16 +0530267 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100268 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100269 return;
Sujithe8324352009-01-16 21:38:42 +0530270
Sujithe8324352009-01-16 21:38:42 +0530271 hdr = (struct ieee80211_hdr *)skb->data;
272 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
273}
274
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200275static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
276{
277 struct ath_buf *bf = NULL;
278
279 spin_lock_bh(&sc->tx.txbuflock);
280
281 if (unlikely(list_empty(&sc->tx.txbuf))) {
282 spin_unlock_bh(&sc->tx.txbuflock);
283 return NULL;
284 }
285
286 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
287 list_del(&bf->list);
288
289 spin_unlock_bh(&sc->tx.txbuflock);
290
291 return bf;
292}
293
294static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
295{
296 spin_lock_bh(&sc->tx.txbuflock);
297 list_add_tail(&bf->list, &sc->tx.txbuf);
298 spin_unlock_bh(&sc->tx.txbuflock);
299}
300
Sujithd43f30152009-01-16 21:38:53 +0530301static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
302{
303 struct ath_buf *tbf;
304
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200305 tbf = ath_tx_get_buffer(sc);
306 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530307 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530308
309 ATH_TXBUF_RESET(tbf);
310
311 tbf->bf_mpdu = bf->bf_mpdu;
312 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400313 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530314 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530315
316 return tbf;
317}
318
Felix Fietkaub572d032010-11-14 15:20:07 +0100319static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
320 struct ath_tx_status *ts, int txok,
321 int *nframes, int *nbad)
322{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100323 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100324 u16 seq_st = 0;
325 u32 ba[WME_BA_BMP_SIZE >> 5];
326 int ba_index;
327 int isaggr = 0;
328
329 *nbad = 0;
330 *nframes = 0;
331
Felix Fietkaub572d032010-11-14 15:20:07 +0100332 isaggr = bf_isaggr(bf);
333 if (isaggr) {
334 seq_st = ts->ts_seqnum;
335 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
336 }
337
338 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100339 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200340 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100341
342 (*nframes)++;
343 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
344 (*nbad)++;
345
346 bf = bf->bf_next;
347 }
348}
349
350
Sujithd43f30152009-01-16 21:38:53 +0530351static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
352 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100353 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530354{
355 struct ath_node *an = NULL;
356 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530357 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100358 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530359 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800360 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530361 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530362 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200363 struct list_head bf_head;
364 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530365 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530366 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530367 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
368 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200369 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100370 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200371 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100372 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200373 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530374
Sujitha22be222009-03-30 15:28:36 +0530375 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530376 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530377
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800378 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800379
Felix Fietkau78c46532010-06-25 01:26:16 +0200380 memcpy(rates, tx_info->control.rates, sizeof(rates));
381
Sujith1286ec62009-01-27 13:30:37 +0530382 rcu_read_lock();
383
Ben Greear686b9cb2010-09-23 09:44:36 -0700384 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530385 if (!sta) {
386 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200387
Felix Fietkau31e79a52010-07-12 23:16:34 +0200388 INIT_LIST_HEAD(&bf_head);
389 while (bf) {
390 bf_next = bf->bf_next;
391
Felix Fietkaufce041b2011-05-19 12:20:25 +0200392 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200393 list_move_tail(&bf->list, &bf_head);
394
Felix Fietkau31e79a52010-07-12 23:16:34 +0200395 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
396 0, 0);
397
398 bf = bf_next;
399 }
Sujith1286ec62009-01-27 13:30:37 +0530400 return;
Sujithe8324352009-01-16 21:38:42 +0530401 }
402
Sujith1286ec62009-01-27 13:30:37 +0530403 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100404 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
405 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530406
Felix Fietkaub11b1602010-07-11 12:48:44 +0200407 /*
408 * The hardware occasionally sends a tx status for the wrong TID.
409 * In this case, the BA status cannot be considered valid and all
410 * subframes need to be retransmitted
411 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100412 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200413 txok = false;
414
Sujithe8324352009-01-16 21:38:42 +0530415 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530416 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530417
Sujithd43f30152009-01-16 21:38:53 +0530418 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700419 if (ts->ts_flags & ATH9K_TX_BA) {
420 seq_st = ts->ts_seqnum;
421 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530422 } else {
Sujithd43f30152009-01-16 21:38:53 +0530423 /*
424 * AR5416 can become deaf/mute when BA
425 * issue happens. Chip needs to be reset.
426 * But AP code may have sychronization issues
427 * when perform internal reset in this routine.
428 * Only enable reset in STA mode for now.
429 */
Sujith2660b812009-02-09 13:27:26 +0530430 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530431 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530432 }
433 }
434
Felix Fietkau56dc6332011-08-28 00:32:22 +0200435 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530436
Felix Fietkaub572d032010-11-14 15:20:07 +0100437 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530438 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200439 u16 seqno = bf->bf_state.seqno;
440
Felix Fietkauf0b82202011-01-15 14:30:15 +0100441 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530442 bf_next = bf->bf_next;
443
Felix Fietkau78c46532010-06-25 01:26:16 +0200444 skb = bf->bf_mpdu;
445 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100446 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200447
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200448 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530449 /* transmit completion, subframe is
450 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530451 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530452 } else if (!isaggr && txok) {
453 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530454 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530455 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200456 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530457 /*
458 * cleanup in progress, just fail
459 * the un-acked sub-frames
460 */
461 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200462 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
463 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
464 !an->sleeping)
465 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
466
467 clear_filter = true;
468 txpending = 1;
469 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200470 txfail = 1;
471 sendbar = 1;
472 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530473 }
474 }
475
Felix Fietkaufce041b2011-05-19 12:20:25 +0200476 /*
477 * Make sure the last desc is reclaimed if it
478 * not a holding desc.
479 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200480 INIT_LIST_HEAD(&bf_head);
481 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
482 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530483 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530484
Felix Fietkau90fa5392010-09-20 13:45:38 +0200485 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530486 /*
487 * complete the acked-ones/xretried ones; update
488 * block-ack window
489 */
490 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200491 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530492 spin_unlock_bh(&txq->axq_lock);
493
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530494 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200495 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau3afd21e2011-09-14 21:24:26 +0200496 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530497 rc_update = false;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530498 }
499
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700500 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
501 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530502 } else {
Sujithd43f30152009-01-16 21:38:53 +0530503 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400504 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
505 if (bf->bf_next == NULL && bf_last->bf_stale) {
506 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530507
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400508 tbf = ath_clone_txbuf(sc, bf_last);
509 /*
510 * Update tx baw and complete the
511 * frame with failed status if we
512 * run out of tx buf.
513 */
514 if (!tbf) {
515 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200516 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400517 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400518
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400519 ath_tx_complete_buf(sc, bf, txq,
520 &bf_head,
Felix Fietkau55797b12011-09-14 21:24:16 +0200521 ts, 0, 1);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400522 break;
523 }
524
Felix Fietkau56dc6332011-08-28 00:32:22 +0200525 fi->bf = tbf;
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400526 }
Sujithe8324352009-01-16 21:38:42 +0530527 }
528
529 /*
530 * Put this buffer to the temporary pending
531 * queue to retain ordering
532 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200533 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530534 }
535
536 bf = bf_next;
537 }
538
Felix Fietkau4cee7862010-07-23 03:53:16 +0200539 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200540 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200541 if (an->sleeping)
542 ieee80211_sta_set_tim(sta);
543
Felix Fietkau4cee7862010-07-23 03:53:16 +0200544 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200545 if (clear_filter)
546 tid->ac->clear_ps_filter = true;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200547 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600548 if (!an->sleeping)
549 ath_tx_queue_tid(txq, tid);
Felix Fietkau4cee7862010-07-23 03:53:16 +0200550 spin_unlock_bh(&txq->axq_lock);
551 }
552
Sujithe8324352009-01-16 21:38:42 +0530553 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200554 ath_tx_flush_tid(sc, tid);
555
Sujithe8324352009-01-16 21:38:42 +0530556 if (tid->baw_head == tid->baw_tail) {
557 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530558 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530559 }
Sujithe8324352009-01-16 21:38:42 +0530560 }
561
Sujith1286ec62009-01-27 13:30:37 +0530562 rcu_read_unlock();
563
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +0530564 if (needreset)
Felix Fietkau236de512011-09-03 01:40:25 +0200565 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Sujithe8324352009-01-16 21:38:42 +0530566}
567
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530568static bool ath_lookup_legacy(struct ath_buf *bf)
569{
570 struct sk_buff *skb;
571 struct ieee80211_tx_info *tx_info;
572 struct ieee80211_tx_rate *rates;
573 int i;
574
575 skb = bf->bf_mpdu;
576 tx_info = IEEE80211_SKB_CB(skb);
577 rates = tx_info->control.rates;
578
Felix Fietkau059ee092011-08-27 10:25:27 +0200579 for (i = 0; i < 4; i++) {
580 if (!rates[i].count || rates[i].idx < 0)
581 break;
582
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530583 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
584 return true;
585 }
586
587 return false;
588}
589
Sujithe8324352009-01-16 21:38:42 +0530590static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
591 struct ath_atx_tid *tid)
592{
Sujithe8324352009-01-16 21:38:42 +0530593 struct sk_buff *skb;
594 struct ieee80211_tx_info *tx_info;
595 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530596 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530597 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530598 int i;
599
Sujitha22be222009-03-30 15:28:36 +0530600 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530601 tx_info = IEEE80211_SKB_CB(skb);
602 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530603
604 /*
605 * Find the lowest frame length among the rate series that will have a
606 * 4ms transmit duration.
607 * TODO - TXOP limit needs to be considered.
608 */
609 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
610
611 for (i = 0; i < 4; i++) {
612 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100613 int modeidx;
614 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530615 legacy = 1;
616 break;
617 }
618
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200619 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100620 modeidx = MCS_HT40;
621 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200622 modeidx = MCS_HT20;
623
624 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
625 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100626
627 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530628 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530629 }
630 }
631
632 /*
633 * limit aggregate size by the minimum rate if rate selected is
634 * not a probe rate, if rate selected is a probe rate then
635 * avoid aggregation of this packet.
636 */
637 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
638 return 0;
639
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530640 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
641 aggr_limit = min((max_4ms_framelen * 3) / 8,
642 (u32)ATH_AMPDU_LIMIT_MAX);
643 else
644 aggr_limit = min(max_4ms_framelen,
645 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530646
647 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300648 * h/w can accept aggregates up to 16 bit lengths (65535).
649 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530650 * as zero. Ignore 65536 since we are constrained by hw.
651 */
Sujith4ef70842009-07-23 15:32:41 +0530652 if (tid->an->maxampdu)
653 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530654
655 return aggr_limit;
656}
657
658/*
Sujithd43f30152009-01-16 21:38:53 +0530659 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530660 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530661 */
662static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530663 struct ath_buf *bf, u16 frmlen,
664 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530665{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530666#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530667 struct sk_buff *skb = bf->bf_mpdu;
668 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530669 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530670 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100671 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200672 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100673 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530674
675 /* Select standard number of delimiters based on frame length alone */
676 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
677
678 /*
679 * If encryption enabled, hardware requires some more padding between
680 * subframes.
681 * TODO - this could be improved to be dependent on the rate.
682 * The hardware can keep up at lower rates, but not higher rates
683 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530684 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
685 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530686 ndelim += ATH_AGGR_ENCRYPTDELIM;
687
688 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530689 * Add delimiter when using RTS/CTS with aggregation
690 * and non enterprise AR9003 card
691 */
Felix Fietkau34597312011-08-29 18:57:54 +0200692 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
693 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530694 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
695
696 /*
Sujithe8324352009-01-16 21:38:42 +0530697 * Convert desired mpdu density from microeconds to bytes based
698 * on highest rate in rate series (i.e. first rate) to determine
699 * required minimum length for subframe. Take into account
700 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530701 *
Sujithe8324352009-01-16 21:38:42 +0530702 * If there is no mpdu density restriction, no further calculation
703 * is needed.
704 */
Sujith4ef70842009-07-23 15:32:41 +0530705
706 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530707 return ndelim;
708
709 rix = tx_info->control.rates[0].idx;
710 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530711 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
712 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
713
714 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530715 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530716 else
Sujith4ef70842009-07-23 15:32:41 +0530717 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530718
719 if (nsymbols == 0)
720 nsymbols = 1;
721
Felix Fietkauc6663872010-04-19 19:57:33 +0200722 streams = HT_RC_2_STREAMS(rix);
723 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530724 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
725
Sujithe8324352009-01-16 21:38:42 +0530726 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530727 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
728 ndelim = max(mindelim, ndelim);
729 }
730
731 return ndelim;
732}
733
734static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530735 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530736 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100737 struct list_head *bf_q,
738 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530739{
740#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200741 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530742 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530743 u16 aggr_limit = 0, al = 0, bpad = 0,
744 al_delta, h_baw = tid->baw_size / 2;
745 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200746 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100747 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200748 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200749 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530750
751 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200752 skb = skb_peek(&tid->buf_q);
753 fi = get_frame_info(skb);
754 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200755 if (!fi->bf)
756 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200757
Felix Fietkau44f1d262011-08-28 00:32:25 +0200758 if (!bf)
759 continue;
760
Felix Fietkau399c6482011-09-14 21:24:17 +0200761 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200762 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200763 if (!bf_first)
764 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530765
Sujithd43f30152009-01-16 21:38:53 +0530766 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200767 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530768 status = ATH_AGGR_BAW_CLOSED;
769 break;
770 }
771
772 if (!rl) {
773 aggr_limit = ath_lookup_rate(sc, bf, tid);
774 rl = 1;
775 }
776
Sujithd43f30152009-01-16 21:38:53 +0530777 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100778 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530779
Sujithd43f30152009-01-16 21:38:53 +0530780 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530781 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
782 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530783 status = ATH_AGGR_LIMITED;
784 break;
785 }
786
Felix Fietkau0299a502010-10-21 02:47:24 +0200787 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Felix Fietkaubdf2dbf2011-09-14 21:24:25 +0200788 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
Felix Fietkau0299a502010-10-21 02:47:24 +0200789 break;
790
Sujithd43f30152009-01-16 21:38:53 +0530791 /* do not exceed subframe limit */
792 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530793 status = ATH_AGGR_LIMITED;
794 break;
795 }
796
Sujithd43f30152009-01-16 21:38:53 +0530797 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530798 al += bpad + al_delta;
799
800 /*
801 * Get the delimiters needed to meet the MPDU
802 * density for this node.
803 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530804 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
805 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530806 bpad = PADBYTES(al_delta) + (ndelim << 2);
807
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530808 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530809 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530810
Sujithd43f30152009-01-16 21:38:53 +0530811 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100812 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200813 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200814 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200815
816 __skb_unlink(skb, &tid->buf_q);
817 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200818 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530819 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200820
Sujithe8324352009-01-16 21:38:42 +0530821 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530822
Felix Fietkau56dc6332011-08-28 00:32:22 +0200823 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530824
Felix Fietkau269c44b2010-11-14 15:20:06 +0100825 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530826
Sujithe8324352009-01-16 21:38:42 +0530827 return status;
828#undef PADBYTES
829}
830
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200831/*
832 * rix - rate index
833 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
834 * width - 0 for 20 MHz, 1 for 40 MHz
835 * half_gi - to use 4us v/s 3.6 us for symbol time
836 */
837static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
838 int width, int half_gi, bool shortPreamble)
839{
840 u32 nbits, nsymbits, duration, nsymbols;
841 int streams;
842
843 /* find number of symbols: PLCP + data */
844 streams = HT_RC_2_STREAMS(rix);
845 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
846 nsymbits = bits_per_symbol[rix % 8][width] * streams;
847 nsymbols = (nbits + nsymbits - 1) / nsymbits;
848
849 if (!half_gi)
850 duration = SYMBOL_TIME(nsymbols);
851 else
852 duration = SYMBOL_TIME_HALFGI(nsymbols);
853
854 /* addup duration for legacy/ht training and signal fields */
855 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
856
857 return duration;
858}
859
Felix Fietkau493cf042011-09-14 21:24:22 +0200860static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
861 struct ath_tx_info *info, int len)
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200862{
863 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200864 struct sk_buff *skb;
865 struct ieee80211_tx_info *tx_info;
866 struct ieee80211_tx_rate *rates;
867 const struct ieee80211_rate *rate;
868 struct ieee80211_hdr *hdr;
Felix Fietkau493cf042011-09-14 21:24:22 +0200869 int i;
870 u8 rix = 0;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200871
872 skb = bf->bf_mpdu;
873 tx_info = IEEE80211_SKB_CB(skb);
874 rates = tx_info->control.rates;
875 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau493cf042011-09-14 21:24:22 +0200876
877 /* set dur_update_en for l-sig computation except for PS-Poll frames */
878 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200879
880 /*
881 * We check if Short Preamble is needed for the CTS rate by
882 * checking the BSS's global flag.
883 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
884 */
885 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
Felix Fietkau493cf042011-09-14 21:24:22 +0200886 info->rtscts_rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200887 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau493cf042011-09-14 21:24:22 +0200888 info->rtscts_rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200889
890 for (i = 0; i < 4; i++) {
891 bool is_40, is_sgi, is_sp;
892 int phy;
893
894 if (!rates[i].count || (rates[i].idx < 0))
895 continue;
896
897 rix = rates[i].idx;
Felix Fietkau493cf042011-09-14 21:24:22 +0200898 info->rates[i].Tries = rates[i].count;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200899
900 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200901 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
902 info->flags |= ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200903 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Felix Fietkau493cf042011-09-14 21:24:22 +0200904 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
905 info->flags |= ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200906 }
907
908 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau493cf042011-09-14 21:24:22 +0200909 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200910 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
Felix Fietkau493cf042011-09-14 21:24:22 +0200911 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200912
913 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
914 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
915 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
916
917 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
918 /* MCS rates */
Felix Fietkau493cf042011-09-14 21:24:22 +0200919 info->rates[i].Rate = rix | 0x80;
920 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
921 ah->txchainmask, info->rates[i].Rate);
922 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200923 is_40, is_sgi, is_sp);
924 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
Felix Fietkau493cf042011-09-14 21:24:22 +0200925 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200926 continue;
927 }
928
929 /* legacy rates */
930 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
931 !(rate->flags & IEEE80211_RATE_ERP_G))
932 phy = WLAN_RC_PHY_CCK;
933 else
934 phy = WLAN_RC_PHY_OFDM;
935
936 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
Felix Fietkau493cf042011-09-14 21:24:22 +0200937 info->rates[i].Rate = rate->hw_value;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200938 if (rate->hw_value_short) {
939 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Felix Fietkau493cf042011-09-14 21:24:22 +0200940 info->rates[i].Rate |= rate->hw_value_short;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200941 } else {
942 is_sp = false;
943 }
944
945 if (bf->bf_state.bfs_paprd)
Felix Fietkau493cf042011-09-14 21:24:22 +0200946 info->rates[i].ChSel = ah->txchainmask;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200947 else
Felix Fietkau493cf042011-09-14 21:24:22 +0200948 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
949 ah->txchainmask, info->rates[i].Rate);
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200950
Felix Fietkau493cf042011-09-14 21:24:22 +0200951 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200952 phy, rate->bitrate * 100, len, rix, is_sp);
953 }
954
955 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
956 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau493cf042011-09-14 21:24:22 +0200957 info->flags &= ~ATH9K_TXDESC_RTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200958
959 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
Felix Fietkau493cf042011-09-14 21:24:22 +0200960 if (info->flags & ATH9K_TXDESC_RTSENA)
961 info->flags &= ~ATH9K_TXDESC_CTSENA;
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200962}
963
Felix Fietkau493cf042011-09-14 21:24:22 +0200964static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
965{
966 struct ieee80211_hdr *hdr;
967 enum ath9k_pkt_type htype;
968 __le16 fc;
969
970 hdr = (struct ieee80211_hdr *)skb->data;
971 fc = hdr->frame_control;
972
973 if (ieee80211_is_beacon(fc))
974 htype = ATH9K_PKT_TYPE_BEACON;
975 else if (ieee80211_is_probe_resp(fc))
976 htype = ATH9K_PKT_TYPE_PROBE_RESP;
977 else if (ieee80211_is_atim(fc))
978 htype = ATH9K_PKT_TYPE_ATIM;
979 else if (ieee80211_is_pspoll(fc))
980 htype = ATH9K_PKT_TYPE_PSPOLL;
981 else
982 htype = ATH9K_PKT_TYPE_NORMAL;
983
984 return htype;
985}
986
987static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
988 struct ath_txq *txq, int len)
Felix Fietkau399c6482011-09-14 21:24:17 +0200989{
990 struct ath_hw *ah = sc->sc_ah;
991 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
992 struct ath_buf *bf_first = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +0200993 struct ath_tx_info info;
Felix Fietkau399c6482011-09-14 21:24:17 +0200994 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
Felix Fietkau399c6482011-09-14 21:24:17 +0200995
Felix Fietkau493cf042011-09-14 21:24:22 +0200996 memset(&info, 0, sizeof(info));
997 info.is_first = true;
998 info.is_last = true;
999 info.txpower = MAX_RATE_POWER;
1000 info.qcu = txq->axq_qnum;
Felix Fietkau399c6482011-09-14 21:24:17 +02001001
Felix Fietkau493cf042011-09-14 21:24:22 +02001002 info.flags = ATH9K_TXDESC_INTREQ;
1003 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1004 info.flags |= ATH9K_TXDESC_NOACK;
1005 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1006 info.flags |= ATH9K_TXDESC_LDPC;
1007
1008 ath_buf_set_rate(sc, bf, &info, len);
1009
1010 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1011 info.flags |= ATH9K_TXDESC_CLRDMASK;
1012
1013 if (bf->bf_state.bfs_paprd)
1014 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
1015
Felix Fietkau399c6482011-09-14 21:24:17 +02001016
1017 while (bf) {
Felix Fietkau493cf042011-09-14 21:24:22 +02001018 struct sk_buff *skb = bf->bf_mpdu;
1019 struct ath_frame_info *fi = get_frame_info(skb);
1020
1021 info.type = get_hw_packet_type(skb);
Felix Fietkau399c6482011-09-14 21:24:17 +02001022 if (bf->bf_next)
Felix Fietkau493cf042011-09-14 21:24:22 +02001023 info.link = bf->bf_next->bf_daddr;
Felix Fietkau399c6482011-09-14 21:24:17 +02001024 else
Felix Fietkau493cf042011-09-14 21:24:22 +02001025 info.link = 0;
Felix Fietkau399c6482011-09-14 21:24:17 +02001026
Felix Fietkau493cf042011-09-14 21:24:22 +02001027 info.buf_addr[0] = bf->bf_buf_addr;
1028 info.buf_len[0] = skb->len;
1029 info.pkt_len = fi->framelen;
1030 info.keyix = fi->keyix;
1031 info.keytype = fi->keytype;
1032
1033 if (aggr) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001034 if (bf == bf_first)
Felix Fietkau493cf042011-09-14 21:24:22 +02001035 info.aggr = AGGR_BUF_FIRST;
1036 else if (!bf->bf_next)
1037 info.aggr = AGGR_BUF_LAST;
1038 else
1039 info.aggr = AGGR_BUF_MIDDLE;
Felix Fietkau399c6482011-09-14 21:24:17 +02001040
Felix Fietkau493cf042011-09-14 21:24:22 +02001041 info.ndelim = bf->bf_state.ndelim;
1042 info.aggr_len = len;
Felix Fietkau399c6482011-09-14 21:24:17 +02001043 }
1044
Felix Fietkau493cf042011-09-14 21:24:22 +02001045 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
Felix Fietkau399c6482011-09-14 21:24:17 +02001046 bf = bf->bf_next;
1047 }
1048}
1049
Sujithe8324352009-01-16 21:38:42 +05301050static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1051 struct ath_atx_tid *tid)
1052{
Sujithd43f30152009-01-16 21:38:53 +05301053 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301054 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001055 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301056 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001057 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301058
1059 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001060 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301061 return;
1062
1063 INIT_LIST_HEAD(&bf_q);
1064
Felix Fietkau269c44b2010-11-14 15:20:06 +01001065 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301066
1067 /*
Sujithd43f30152009-01-16 21:38:53 +05301068 * no frames picked up to be aggregated;
1069 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301070 */
1071 if (list_empty(&bf_q))
1072 break;
1073
1074 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301075 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001076 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301077
Felix Fietkau55195412011-04-17 23:28:09 +02001078 if (tid->ac->clear_ps_filter) {
1079 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001080 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1081 } else {
1082 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001083 }
1084
Sujithd43f30152009-01-16 21:38:53 +05301085 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001086 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001087 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1088 bf->bf_state.bf_type = BUF_AMPDU;
1089 } else {
1090 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301091 }
1092
Felix Fietkau493cf042011-09-14 21:24:22 +02001093 ath_tx_fill_desc(sc, bf, txq, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001094 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001095 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301096 status != ATH_AGGR_BAW_CLOSED);
1097}
1098
Felix Fietkau231c3a12010-09-20 19:35:28 +02001099int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1100 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301101{
1102 struct ath_atx_tid *txtid;
1103 struct ath_node *an;
1104
1105 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301106 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001107
1108 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1109 return -EAGAIN;
1110
Sujithf83da962009-07-23 15:32:37 +05301111 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001112 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001113 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001114
Felix Fietkau2ed72222011-01-10 17:05:49 -07001115 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1116 txtid->baw_head = txtid->baw_tail = 0;
1117
Felix Fietkau231c3a12010-09-20 19:35:28 +02001118 return 0;
Sujithe8324352009-01-16 21:38:42 +05301119}
1120
Sujithf83da962009-07-23 15:32:37 +05301121void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301122{
1123 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1124 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001125 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301126
1127 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301128 return;
Sujithe8324352009-01-16 21:38:42 +05301129
1130 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301131 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301132 return;
Sujithe8324352009-01-16 21:38:42 +05301133 }
1134
Sujithe8324352009-01-16 21:38:42 +05301135 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001136 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001137
1138 /*
1139 * If frames are still being transmitted for this TID, they will be
1140 * cleaned up during tx completion. To prevent race conditions, this
1141 * TID can only be reused after all in-progress subframes have been
1142 * completed.
1143 */
1144 if (txtid->baw_head != txtid->baw_tail)
1145 txtid->state |= AGGR_CLEANUP;
1146 else
1147 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +05301148 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301149
Felix Fietkau90fa5392010-09-20 13:45:38 +02001150 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +05301151}
1152
Felix Fietkau55195412011-04-17 23:28:09 +02001153bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
1154{
1155 struct ath_atx_tid *tid;
1156 struct ath_atx_ac *ac;
1157 struct ath_txq *txq;
1158 bool buffered = false;
1159 int tidno;
1160
1161 for (tidno = 0, tid = &an->tid[tidno];
1162 tidno < WME_NUM_TID; tidno++, tid++) {
1163
1164 if (!tid->sched)
1165 continue;
1166
1167 ac = tid->ac;
1168 txq = ac->txq;
1169
1170 spin_lock_bh(&txq->axq_lock);
1171
Felix Fietkau56dc6332011-08-28 00:32:22 +02001172 if (!skb_queue_empty(&tid->buf_q))
Felix Fietkau55195412011-04-17 23:28:09 +02001173 buffered = true;
1174
1175 tid->sched = false;
1176 list_del(&tid->list);
1177
1178 if (ac->sched) {
1179 ac->sched = false;
1180 list_del(&ac->list);
1181 }
1182
1183 spin_unlock_bh(&txq->axq_lock);
1184 }
1185
1186 return buffered;
1187}
1188
1189void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1190{
1191 struct ath_atx_tid *tid;
1192 struct ath_atx_ac *ac;
1193 struct ath_txq *txq;
1194 int tidno;
1195
1196 for (tidno = 0, tid = &an->tid[tidno];
1197 tidno < WME_NUM_TID; tidno++, tid++) {
1198
1199 ac = tid->ac;
1200 txq = ac->txq;
1201
1202 spin_lock_bh(&txq->axq_lock);
1203 ac->clear_ps_filter = true;
1204
Felix Fietkau56dc6332011-08-28 00:32:22 +02001205 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001206 ath_tx_queue_tid(txq, tid);
1207 ath_txq_schedule(sc, txq);
1208 }
1209
1210 spin_unlock_bh(&txq->axq_lock);
1211 }
1212}
1213
Sujithe8324352009-01-16 21:38:42 +05301214void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1215{
1216 struct ath_atx_tid *txtid;
1217 struct ath_node *an;
1218
1219 an = (struct ath_node *)sta->drv_priv;
1220
1221 if (sc->sc_flags & SC_OP_TXAGGR) {
1222 txtid = ATH_AN_2_TID(an, tid);
1223 txtid->baw_size =
1224 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1225 txtid->state |= AGGR_ADDBA_COMPLETE;
1226 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1227 ath_tx_resume_tid(sc, txtid);
1228 }
1229}
1230
Sujithe8324352009-01-16 21:38:42 +05301231/********************/
1232/* Queue Management */
1233/********************/
1234
Sujithe8324352009-01-16 21:38:42 +05301235static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1236 struct ath_txq *txq)
1237{
1238 struct ath_atx_ac *ac, *ac_tmp;
1239 struct ath_atx_tid *tid, *tid_tmp;
1240
1241 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1242 list_del(&ac->list);
1243 ac->sched = false;
1244 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1245 list_del(&tid->list);
1246 tid->sched = false;
1247 ath_tid_drain(sc, txq, tid);
1248 }
1249 }
1250}
1251
1252struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1253{
Sujithcbe61d82009-02-09 13:27:12 +05301254 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001255 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301256 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001257 static const int subtype_txq_to_hwq[] = {
1258 [WME_AC_BE] = ATH_TXQ_AC_BE,
1259 [WME_AC_BK] = ATH_TXQ_AC_BK,
1260 [WME_AC_VI] = ATH_TXQ_AC_VI,
1261 [WME_AC_VO] = ATH_TXQ_AC_VO,
1262 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001263 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301264
1265 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001266 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301267 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1268 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1269 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1270 qi.tqi_physCompBuf = 0;
1271
1272 /*
1273 * Enable interrupts only for EOL and DESC conditions.
1274 * We mark tx descriptors to receive a DESC interrupt
1275 * when a tx queue gets deep; otherwise waiting for the
1276 * EOL to reap descriptors. Note that this is done to
1277 * reduce interrupt load and this only defers reaping
1278 * descriptors, never transmitting frames. Aside from
1279 * reducing interrupts this also permits more concurrency.
1280 * The only potential downside is if the tx queue backs
1281 * up in which case the top half of the kernel may backup
1282 * due to a lack of tx descriptors.
1283 *
1284 * The UAPSD queue is an exception, since we take a desc-
1285 * based intr on the EOSP frames.
1286 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001287 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1288 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1289 TXQ_FLAG_TXERRINT_ENABLE;
1290 } else {
1291 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1292 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1293 else
1294 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1295 TXQ_FLAG_TXDESCINT_ENABLE;
1296 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001297 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1298 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301299 /*
1300 * NB: don't print a message, this happens
1301 * normally on parts with too few tx queues
1302 */
1303 return NULL;
1304 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001305 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001306 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001307 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1308 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301309 return NULL;
1310 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001311 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1312 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301313
Ben Greear60f2d1d2011-01-09 23:11:52 -08001314 txq->axq_qnum = axq_qnum;
1315 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301316 txq->axq_link = NULL;
1317 INIT_LIST_HEAD(&txq->axq_q);
1318 INIT_LIST_HEAD(&txq->axq_acq);
1319 spin_lock_init(&txq->axq_lock);
1320 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001321 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001322 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001323 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001324
1325 txq->txq_headidx = txq->txq_tailidx = 0;
1326 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1327 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301328 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001329 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301330}
1331
Sujithe8324352009-01-16 21:38:42 +05301332int ath_txq_update(struct ath_softc *sc, int qnum,
1333 struct ath9k_tx_queue_info *qinfo)
1334{
Sujithcbe61d82009-02-09 13:27:12 +05301335 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301336 int error = 0;
1337 struct ath9k_tx_queue_info qi;
1338
1339 if (qnum == sc->beacon.beaconq) {
1340 /*
1341 * XXX: for beacon queue, we just save the parameter.
1342 * It will be picked up by ath_beaconq_config when
1343 * it's necessary.
1344 */
1345 sc->beacon.beacon_qi = *qinfo;
1346 return 0;
1347 }
1348
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001349 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301350
1351 ath9k_hw_get_txq_props(ah, qnum, &qi);
1352 qi.tqi_aifs = qinfo->tqi_aifs;
1353 qi.tqi_cwmin = qinfo->tqi_cwmin;
1354 qi.tqi_cwmax = qinfo->tqi_cwmax;
1355 qi.tqi_burstTime = qinfo->tqi_burstTime;
1356 qi.tqi_readyTime = qinfo->tqi_readyTime;
1357
1358 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001359 ath_err(ath9k_hw_common(sc->sc_ah),
1360 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301361 error = -EIO;
1362 } else {
1363 ath9k_hw_resettxqueue(ah, qnum);
1364 }
1365
1366 return error;
1367}
1368
1369int ath_cabq_update(struct ath_softc *sc)
1370{
1371 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001372 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301373 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301374
1375 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1376 /*
1377 * Ensure the readytime % is within the bounds.
1378 */
Sujith17d79042009-02-09 13:27:03 +05301379 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1380 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1381 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1382 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301383
Steve Brown9814f6b2011-02-07 17:10:39 -07001384 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301385 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301386 ath_txq_update(sc, qnum, &qi);
1387
1388 return 0;
1389}
1390
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001391static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1392{
1393 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1394 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1395}
1396
Felix Fietkaufce041b2011-05-19 12:20:25 +02001397static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1398 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301399 __releases(txq->axq_lock)
1400 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301401{
1402 struct ath_buf *bf, *lastbf;
1403 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001404 struct ath_tx_status ts;
1405
1406 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301407 INIT_LIST_HEAD(&bf_head);
1408
Felix Fietkaufce041b2011-05-19 12:20:25 +02001409 while (!list_empty(list)) {
1410 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301411
Felix Fietkaufce041b2011-05-19 12:20:25 +02001412 if (bf->bf_stale) {
1413 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301414
Felix Fietkaufce041b2011-05-19 12:20:25 +02001415 ath_tx_return_buffer(sc, bf);
1416 continue;
Sujithe8324352009-01-16 21:38:42 +05301417 }
1418
1419 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001420 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001421
Sujithe8324352009-01-16 21:38:42 +05301422 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001423 if (bf_is_ampdu_not_probing(bf))
1424 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301425
Felix Fietkaufce041b2011-05-19 12:20:25 +02001426 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301427 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001428 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1429 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301430 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001431 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001432 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001433 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001434}
1435
1436/*
1437 * Drain a given TX queue (could be Beacon or Data)
1438 *
1439 * This assumes output has been stopped and
1440 * we do not need to block ath_tx_tasklet.
1441 */
1442void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1443{
1444 spin_lock_bh(&txq->axq_lock);
1445 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1446 int idx = txq->txq_tailidx;
1447
1448 while (!list_empty(&txq->txq_fifo[idx])) {
1449 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1450 retry_tx);
1451
1452 INCR(idx, ATH_TXFIFO_DEPTH);
1453 }
1454 txq->txq_tailidx = idx;
1455 }
1456
1457 txq->axq_link = NULL;
1458 txq->axq_tx_inprogress = false;
1459 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001460
1461 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001462 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1463 ath_txq_drain_pending_buffers(sc, txq);
1464
1465 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301466}
1467
Felix Fietkau080e1a22010-12-05 20:17:53 +01001468bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301469{
Sujithcbe61d82009-02-09 13:27:12 +05301470 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001471 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301472 struct ath_txq *txq;
1473 int i, npend = 0;
1474
1475 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001476 return true;
Sujith043a0402009-01-16 21:38:47 +05301477
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001478 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301479
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001480 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301481 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001482 if (!ATH_TXQ_SETUP(sc, i))
1483 continue;
1484
1485 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301486 }
1487
Felix Fietkau080e1a22010-12-05 20:17:53 +01001488 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001489 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301490
1491 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001492 if (!ATH_TXQ_SETUP(sc, i))
1493 continue;
1494
1495 /*
1496 * The caller will resume queues with ieee80211_wake_queues.
1497 * Mark the queue as not stopped to prevent ath_tx_complete
1498 * from waking the queue too early.
1499 */
1500 txq = &sc->tx.txq[i];
1501 txq->stopped = false;
1502 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301503 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001504
1505 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301506}
1507
Sujithe8324352009-01-16 21:38:42 +05301508void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1509{
1510 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1511 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1512}
1513
Ben Greear7755bad2011-01-18 17:30:00 -08001514/* For each axq_acq entry, for each tid, try to schedule packets
1515 * for transmit until ampdu_depth has reached min Q depth.
1516 */
Sujithe8324352009-01-16 21:38:42 +05301517void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1518{
Ben Greear7755bad2011-01-18 17:30:00 -08001519 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1520 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301521
Felix Fietkau236de512011-09-03 01:40:25 +02001522 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001523 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301524 return;
1525
1526 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001527 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301528
Ben Greear7755bad2011-01-18 17:30:00 -08001529 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1530 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1531 list_del(&ac->list);
1532 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301533
Ben Greear7755bad2011-01-18 17:30:00 -08001534 while (!list_empty(&ac->tid_q)) {
1535 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1536 list);
1537 list_del(&tid->list);
1538 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301539
Ben Greear7755bad2011-01-18 17:30:00 -08001540 if (tid->paused)
1541 continue;
Sujithe8324352009-01-16 21:38:42 +05301542
Ben Greear7755bad2011-01-18 17:30:00 -08001543 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301544
Ben Greear7755bad2011-01-18 17:30:00 -08001545 /*
1546 * add tid to round-robin queue if more frames
1547 * are pending for the tid
1548 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001549 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001550 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301551
Ben Greear7755bad2011-01-18 17:30:00 -08001552 if (tid == last_tid ||
1553 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1554 break;
Sujithe8324352009-01-16 21:38:42 +05301555 }
Ben Greear7755bad2011-01-18 17:30:00 -08001556
1557 if (!list_empty(&ac->tid_q)) {
1558 if (!ac->sched) {
1559 ac->sched = true;
1560 list_add_tail(&ac->list, &txq->axq_acq);
1561 }
1562 }
1563
1564 if (ac == last_ac ||
1565 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1566 return;
Sujithe8324352009-01-16 21:38:42 +05301567 }
1568}
1569
Sujithe8324352009-01-16 21:38:42 +05301570/***********/
1571/* TX, DMA */
1572/***********/
1573
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001574/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001575 * Insert a chain of ath_buf (descriptors) on a txq and
1576 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001577 */
Sujith102e0572008-10-29 10:15:16 +05301578static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001579 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001580{
Sujithcbe61d82009-02-09 13:27:12 +05301581 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001582 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001583 struct ath_buf *bf, *bf_last;
1584 bool puttxbuf = false;
1585 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301586
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001587 /*
1588 * Insert the frame on the outbound list and
1589 * pass it on to the hardware.
1590 */
1591
1592 if (list_empty(head))
1593 return;
1594
Felix Fietkaufce041b2011-05-19 12:20:25 +02001595 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001596 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001597 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001598
Joe Perches226afe62010-12-02 19:12:37 -08001599 ath_dbg(common, ATH_DBG_QUEUE,
1600 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001601
Felix Fietkaufce041b2011-05-19 12:20:25 +02001602 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1603 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001604 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001605 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001606 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001607 list_splice_tail_init(head, &txq->axq_q);
1608
Felix Fietkaufce041b2011-05-19 12:20:25 +02001609 if (txq->axq_link) {
1610 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001611 ath_dbg(common, ATH_DBG_XMIT,
1612 "link[%u] (%p)=%llx (%p)\n",
1613 txq->axq_qnum, txq->axq_link,
1614 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001615 } else if (!edma)
1616 puttxbuf = true;
1617
1618 txq->axq_link = bf_last->bf_desc;
1619 }
1620
1621 if (puttxbuf) {
1622 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1623 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1624 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1625 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1626 }
1627
1628 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001629 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001630 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001631 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001632
1633 if (!internal) {
1634 txq->axq_depth++;
1635 if (bf_is_ampdu_not_probing(bf))
1636 txq->axq_ampdu_depth++;
1637 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001638}
1639
Sujithe8324352009-01-16 21:38:42 +05301640static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001641 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301642{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001643 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001644 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001645 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301646
1647 /*
1648 * Do not queue to h/w when any of the following conditions is true:
1649 * - there are pending frames in software queue
1650 * - the TID is currently paused for ADDBA/BAR request
1651 * - seqno is not within block-ack window
1652 * - h/w queue depth exceeds low water mark
1653 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001654 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001655 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001656 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001657 /*
Sujithe8324352009-01-16 21:38:42 +05301658 * Add this frame to software queue for scheduling later
1659 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001660 */
Ben Greearbda8add2011-01-09 23:11:48 -08001661 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001662 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001663 if (!txctl->an || !txctl->an->sleeping)
1664 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301665 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001666 }
1667
Felix Fietkau44f1d262011-08-28 00:32:25 +02001668 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1669 if (!bf)
1670 return;
1671
Felix Fietkau399c6482011-09-14 21:24:17 +02001672 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001673 INIT_LIST_HEAD(&bf_head);
1674 list_add(&bf->list, &bf_head);
1675
Sujithe8324352009-01-16 21:38:42 +05301676 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001677 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301678
1679 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001680 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301681 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001682 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001683 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301684}
1685
Felix Fietkau82b873a2010-11-11 03:18:37 +01001686static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001687 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001688{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001689 struct ath_frame_info *fi = get_frame_info(skb);
1690 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301691 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001692
Felix Fietkau44f1d262011-08-28 00:32:25 +02001693 bf = fi->bf;
1694 if (!bf)
1695 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1696
1697 if (!bf)
1698 return;
1699
1700 INIT_LIST_HEAD(&bf_head);
1701 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001702 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301703
1704 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001705 if (tid)
1706 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301707
Sujithd43f30152009-01-16 21:38:53 +05301708 bf->bf_lastbf = bf;
Felix Fietkau493cf042011-09-14 21:24:22 +02001709 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001710 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301711 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001712}
1713
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001714static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1715 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301716{
1717 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001718 struct ieee80211_sta *sta = tx_info->control.sta;
1719 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001720 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001721 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001722 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001723 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301724
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001725 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301726
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001727 if (sta)
1728 an = (struct ath_node *) sta->drv_priv;
1729
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001730 memset(fi, 0, sizeof(*fi));
1731 if (hw_key)
1732 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001733 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1734 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001735 else
1736 fi->keyix = ATH9K_TXKEYIX_INVALID;
1737 fi->keytype = keytype;
1738 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301739}
1740
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301741u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1742{
1743 struct ath_hw *ah = sc->sc_ah;
1744 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301745 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1746 (curchan->channelFlags & CHANNEL_5GHZ) &&
1747 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301748 return 0x3;
1749 else
1750 return chainmask;
1751}
1752
Felix Fietkau44f1d262011-08-28 00:32:25 +02001753/*
1754 * Assign a descriptor (and sequence number if necessary,
1755 * and map buffer for DMA. Frees skb on error
1756 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001757static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001758 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001759 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001760 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301761{
Felix Fietkau82b873a2010-11-11 03:18:37 +01001762 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001763 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001764 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001765 struct ath_buf *bf;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001766 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001767
1768 bf = ath_tx_get_buffer(sc);
1769 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001770 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001771 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001772 }
Sujithe8324352009-01-16 21:38:42 +05301773
Sujithe8324352009-01-16 21:38:42 +05301774 ATH_TXBUF_RESET(bf);
1775
Felix Fietkaufa05f872011-08-28 00:32:24 +02001776 if (tid) {
1777 seqno = tid->seq_next;
1778 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1779 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1780 bf->bf_state.seqno = seqno;
1781 }
1782
Sujithe8324352009-01-16 21:38:42 +05301783 bf->bf_mpdu = skb;
1784
Ben Greearc1739eb32010-10-14 12:45:29 -07001785 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1786 skb->len, DMA_TO_DEVICE);
1787 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301788 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001789 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001790 ath_err(ath9k_hw_common(sc->sc_ah),
1791 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001792 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001793 goto error;
Sujithe8324352009-01-16 21:38:42 +05301794 }
1795
Felix Fietkau56dc6332011-08-28 00:32:22 +02001796 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001797
1798 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001799
1800error:
1801 dev_kfree_skb_any(skb);
1802 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001803}
1804
1805/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001806static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001807 struct ath_tx_control *txctl)
1808{
Felix Fietkau04caf862010-11-14 15:20:12 +01001809 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1810 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001811 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001812 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001813 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301814
Sujithe8324352009-01-16 21:38:42 +05301815 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301816 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1817 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001818 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1819 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001820 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001821
Felix Fietkau066dae92010-11-07 14:59:39 +01001822 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001823 }
1824
1825 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001826 /*
1827 * Try aggregation if it's a unicast data frame
1828 * and the destination is HT capable.
1829 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001830 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301831 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001832 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1833 if (!bf)
1834 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001835
Felix Fietkau82b873a2010-11-11 03:18:37 +01001836 bf->bf_state.bfs_paprd = txctl->paprd;
1837
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301838 if (txctl->paprd)
1839 bf->bf_state.bfs_paprd_timestamp = jiffies;
1840
Felix Fietkau44f1d262011-08-28 00:32:25 +02001841 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301842 }
1843
Felix Fietkaufa05f872011-08-28 00:32:24 +02001844out:
Sujithe8324352009-01-16 21:38:42 +05301845 spin_unlock_bh(&txctl->txq->axq_lock);
1846}
1847
1848/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001849int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301850 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001851{
Felix Fietkau28d16702010-11-14 15:20:10 +01001852 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1853 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001854 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001855 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001856 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001857 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001858 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001859 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001860 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001861
Ben Greeara9927ba2010-12-06 21:13:49 -08001862 /* NOTE: sta can be NULL according to net/mac80211.h */
1863 if (sta)
1864 txctl->an = (struct ath_node *)sta->drv_priv;
1865
Felix Fietkau04caf862010-11-14 15:20:12 +01001866 if (info->control.hw_key)
1867 frmlen += info->control.hw_key->icv_len;
1868
Felix Fietkau28d16702010-11-14 15:20:10 +01001869 /*
1870 * As a temporary workaround, assign seq# here; this will likely need
1871 * to be cleaned up to work better with Beacon transmission and virtual
1872 * BSSes.
1873 */
1874 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1875 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1876 sc->tx.seq_no += 0x10;
1877 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1878 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1879 }
1880
1881 /* Add the padding after the header if this is not already done */
1882 padpos = ath9k_cmn_padpos(hdr->frame_control);
1883 padsize = padpos & 3;
1884 if (padsize && skb->len > padpos) {
1885 if (skb_headroom(skb) < padsize)
1886 return -ENOMEM;
1887
1888 skb_push(skb, padsize);
1889 memmove(skb->data, skb->data + padsize, padpos);
1890 }
1891
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001892 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1893 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1894 !ieee80211_is_data(hdr->frame_control))
1895 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1896
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001897 setup_frame_info(hw, skb, frmlen);
1898
1899 /*
1900 * At this point, the vif, hw_key and sta pointers in the tx control
1901 * info are no longer valid (overwritten by the ath_frame_info data.
1902 */
1903
Felix Fietkau066dae92010-11-07 14:59:39 +01001904 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001905 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001906 if (txq == sc->tx.txq_map[q] &&
1907 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001908 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001909 txq->stopped = 1;
1910 }
1911 spin_unlock_bh(&txq->axq_lock);
1912
Felix Fietkau44f1d262011-08-28 00:32:25 +02001913 ath_tx_start_dma(sc, skb, txctl);
1914 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001915}
1916
Sujithe8324352009-01-16 21:38:42 +05301917/*****************/
1918/* TX Completion */
1919/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001920
Sujithe8324352009-01-16 21:38:42 +05301921static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301922 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001923{
Sujithe8324352009-01-16 21:38:42 +05301924 struct ieee80211_hw *hw = sc->hw;
1925 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001926 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001927 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001928 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301929
Joe Perches226afe62010-12-02 19:12:37 -08001930 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301931
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301932 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301933 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301934
Felix Fietkau55797b12011-09-14 21:24:16 +02001935 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301936 /* Frame was ACKed */
1937 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301938
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001939 padpos = ath9k_cmn_padpos(hdr->frame_control);
1940 padsize = padpos & 3;
1941 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301942 /*
1943 * Remove MAC header padding before giving the frame back to
1944 * mac80211.
1945 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001946 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301947 skb_pull(skb, padsize);
1948 }
1949
Sujith1b04b932010-01-08 10:36:05 +05301950 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1951 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001952 ath_dbg(common, ATH_DBG_PS,
1953 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301954 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1955 PS_WAIT_FOR_CAB |
1956 PS_WAIT_FOR_PSPOLL_DATA |
1957 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001958 }
1959
Felix Fietkau7545daf2011-01-24 19:23:16 +01001960 q = skb_get_queue_mapping(skb);
1961 if (txq == sc->tx.txq_map[q]) {
1962 spin_lock_bh(&txq->axq_lock);
1963 if (WARN_ON(--txq->pending_frames < 0))
1964 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001965
Felix Fietkau7545daf2011-01-24 19:23:16 +01001966 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1967 ieee80211_wake_queue(sc->hw, q);
1968 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001969 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001970 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001971 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001972
1973 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301974}
1975
1976static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001977 struct ath_txq *txq, struct list_head *bf_q,
1978 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301979{
1980 struct sk_buff *skb = bf->bf_mpdu;
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001981 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithe8324352009-01-16 21:38:42 +05301982 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301983 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301984
Sujithe8324352009-01-16 21:38:42 +05301985 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301986 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301987
Felix Fietkau55797b12011-09-14 21:24:16 +02001988 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301989 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301990
Felix Fietkau3afd21e2011-09-14 21:24:26 +02001991 if (ts->ts_status & ATH9K_TXERR_FILT)
1992 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1993
Ben Greearc1739eb32010-10-14 12:45:29 -07001994 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001995 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001996
1997 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301998 if (time_after(jiffies,
1999 bf->bf_state.bfs_paprd_timestamp +
2000 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002001 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002002 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002003 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002004 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002005 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302006 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002007 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002008 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2009 * accidentally reference it later.
2010 */
2011 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302012
2013 /*
2014 * Return the list of ath_buf of this mpdu to free queue
2015 */
2016 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2017 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2018 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2019}
2020
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002021static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2022 struct ath_tx_status *ts, int nframes, int nbad,
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002023 int txok)
Sujithc4288392008-11-18 09:09:30 +05302024{
Sujitha22be222009-03-30 15:28:36 +05302025 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302026 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302027 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002028 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002029 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302030 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302031
Sujith95e4acb2009-03-13 08:56:09 +05302032 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002033 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302034
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002035 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302036 WARN_ON(tx_rateindex >= hw->max_rates);
2037
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002038 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002039 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302040
Felix Fietkaub572d032010-11-14 15:20:07 +01002041 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002042
Felix Fietkaub572d032010-11-14 15:20:07 +01002043 tx_info->status.ampdu_len = nframes;
2044 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002045 }
2046
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002047 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002048 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002049 /*
2050 * If an underrun error is seen assume it as an excessive
2051 * retry only if max frame trigger level has been reached
2052 * (2 KB for single stream, and 4 KB for dual stream).
2053 * Adjust the long retry as if the frame was tried
2054 * hw->max_rate_tries times to affect how rate control updates
2055 * PER for the failed rate.
2056 * In case of congestion on the bus penalizing this type of
2057 * underruns should help hardware actually transmit new frames
2058 * successfully by eventually preferring slower rates.
2059 * This itself should also alleviate congestion on the bus.
2060 */
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002061 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2062 ATH9K_TX_DELIM_UNDERRUN)) &&
2063 ieee80211_is_data(hdr->frame_control) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002064 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002065 tx_info->status.rates[tx_rateindex].count =
2066 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302067 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302068
Felix Fietkau545750d2009-11-23 22:21:01 +01002069 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302070 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002071 tx_info->status.rates[i].idx = -1;
2072 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302073
Felix Fietkau78c46532010-06-25 01:26:16 +02002074 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302075}
2076
Felix Fietkaufce041b2011-05-19 12:20:25 +02002077static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2078 struct ath_tx_status *ts, struct ath_buf *bf,
2079 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302080 __releases(txq->axq_lock)
2081 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002082{
2083 int txok;
2084
2085 txq->axq_depth--;
2086 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2087 txq->axq_tx_inprogress = false;
2088 if (bf_is_ampdu_not_probing(bf))
2089 txq->axq_ampdu_depth--;
2090
2091 spin_unlock_bh(&txq->axq_lock);
2092
2093 if (!bf_isampdu(bf)) {
Felix Fietkau3afd21e2011-09-14 21:24:26 +02002094 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002095 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2096 } else
2097 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2098
2099 spin_lock_bh(&txq->axq_lock);
2100
2101 if (sc->sc_flags & SC_OP_TXAGGR)
2102 ath_txq_schedule(sc, txq);
2103}
2104
Sujithc4288392008-11-18 09:09:30 +05302105static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002106{
Sujithcbe61d82009-02-09 13:27:12 +05302107 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002108 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002109 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2110 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302111 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002112 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002113 int status;
2114
Joe Perches226afe62010-12-02 19:12:37 -08002115 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2116 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2117 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002118
Felix Fietkaufce041b2011-05-19 12:20:25 +02002119 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002120 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002121 if (work_pending(&sc->hw_reset_work))
2122 break;
2123
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002124 if (list_empty(&txq->axq_q)) {
2125 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002126 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002127 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002128 break;
2129 }
2130 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2131
2132 /*
2133 * There is a race condition that a BH gets scheduled
2134 * after sw writes TxE and before hw re-load the last
2135 * descriptor to get the newly chained one.
2136 * Software must keep the last DONE descriptor as a
2137 * holding descriptor - software does so by marking
2138 * it with the STALE flag.
2139 */
2140 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302141 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002142 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002143 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002144 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002145
2146 bf = list_entry(bf_held->list.next, struct ath_buf,
2147 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002148 }
2149
2150 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302151 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002152
Felix Fietkau29bffa92010-03-29 20:14:23 -07002153 memset(&ts, 0, sizeof(ts));
2154 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002155 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002156 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002157
Ben Greear2dac4fb2011-01-09 23:11:45 -08002158 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002159
2160 /*
2161 * Remove ath_buf's of the same transmit unit from txq,
2162 * however leave the last descriptor back as the holding
2163 * descriptor for hw.
2164 */
Sujitha119cc42009-03-30 15:28:38 +05302165 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002166 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002167 if (!list_is_singular(&lastbf->list))
2168 list_cut_position(&bf_head,
2169 &txq->axq_q, lastbf->list.prev);
2170
Felix Fietkaufce041b2011-05-19 12:20:25 +02002171 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002172 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002173 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002174 }
Johannes Berge6a98542008-10-21 12:40:02 +02002175
Felix Fietkaufce041b2011-05-19 12:20:25 +02002176 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002177 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002178 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002179}
2180
Sujith305fe472009-07-23 15:32:29 +05302181static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002182{
2183 struct ath_softc *sc = container_of(work, struct ath_softc,
2184 tx_complete_work.work);
2185 struct ath_txq *txq;
2186 int i;
2187 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002188#ifdef CONFIG_ATH9K_DEBUGFS
2189 sc->tx_complete_poll_work_seen++;
2190#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002191
2192 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2193 if (ATH_TXQ_SETUP(sc, i)) {
2194 txq = &sc->tx.txq[i];
2195 spin_lock_bh(&txq->axq_lock);
2196 if (txq->axq_depth) {
2197 if (txq->axq_tx_inprogress) {
2198 needreset = true;
2199 spin_unlock_bh(&txq->axq_lock);
2200 break;
2201 } else {
2202 txq->axq_tx_inprogress = true;
2203 }
2204 }
2205 spin_unlock_bh(&txq->axq_lock);
2206 }
2207
2208 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002209 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2210 "tx hung, resetting the chip\n");
Felix Fietkau236de512011-09-03 01:40:25 +02002211 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002212 }
2213
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002214 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002215 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2216}
2217
2218
Sujithe8324352009-01-16 21:38:42 +05302219
2220void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002221{
Sujithe8324352009-01-16 21:38:42 +05302222 int i;
2223 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002224
Sujithe8324352009-01-16 21:38:42 +05302225 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002226
2227 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302228 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2229 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002230 }
2231}
2232
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002233void ath_tx_edma_tasklet(struct ath_softc *sc)
2234{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002235 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002236 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2237 struct ath_hw *ah = sc->sc_ah;
2238 struct ath_txq *txq;
2239 struct ath_buf *bf, *lastbf;
2240 struct list_head bf_head;
2241 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002242
2243 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002244 if (work_pending(&sc->hw_reset_work))
2245 break;
2246
Felix Fietkaufce041b2011-05-19 12:20:25 +02002247 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002248 if (status == -EINPROGRESS)
2249 break;
2250 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002251 ath_dbg(common, ATH_DBG_XMIT,
2252 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002253 break;
2254 }
2255
2256 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002257 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002258 continue;
2259
Felix Fietkaufce041b2011-05-19 12:20:25 +02002260 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002261
2262 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002263
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002264 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2265 spin_unlock_bh(&txq->axq_lock);
2266 return;
2267 }
2268
2269 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2270 struct ath_buf, list);
2271 lastbf = bf->bf_lastbf;
2272
2273 INIT_LIST_HEAD(&bf_head);
2274 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2275 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002276
Felix Fietkaufce041b2011-05-19 12:20:25 +02002277 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2278 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002279
Felix Fietkaufce041b2011-05-19 12:20:25 +02002280 if (!list_empty(&txq->axq_q)) {
2281 struct list_head bf_q;
2282
2283 INIT_LIST_HEAD(&bf_q);
2284 txq->axq_link = NULL;
2285 list_splice_tail_init(&txq->axq_q, &bf_q);
2286 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2287 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002288 }
2289
Felix Fietkaufce041b2011-05-19 12:20:25 +02002290 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002291 spin_unlock_bh(&txq->axq_lock);
2292 }
2293}
2294
Sujithe8324352009-01-16 21:38:42 +05302295/*****************/
2296/* Init, Cleanup */
2297/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002298
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002299static int ath_txstatus_setup(struct ath_softc *sc, int size)
2300{
2301 struct ath_descdma *dd = &sc->txsdma;
2302 u8 txs_len = sc->sc_ah->caps.txs_len;
2303
2304 dd->dd_desc_len = size * txs_len;
2305 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2306 &dd->dd_desc_paddr, GFP_KERNEL);
2307 if (!dd->dd_desc)
2308 return -ENOMEM;
2309
2310 return 0;
2311}
2312
2313static int ath_tx_edma_init(struct ath_softc *sc)
2314{
2315 int err;
2316
2317 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2318 if (!err)
2319 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2320 sc->txsdma.dd_desc_paddr,
2321 ATH_TXSTATUS_RING_SIZE);
2322
2323 return err;
2324}
2325
2326static void ath_tx_edma_cleanup(struct ath_softc *sc)
2327{
2328 struct ath_descdma *dd = &sc->txsdma;
2329
2330 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2331 dd->dd_desc_paddr);
2332}
2333
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002334int ath_tx_init(struct ath_softc *sc, int nbufs)
2335{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002336 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002337 int error = 0;
2338
Sujith797fe5cb2009-03-30 15:28:45 +05302339 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002340
Sujith797fe5cb2009-03-30 15:28:45 +05302341 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002342 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302343 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002344 ath_err(common,
2345 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302346 goto err;
2347 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002348
Sujith797fe5cb2009-03-30 15:28:45 +05302349 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002350 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302351 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002352 ath_err(common,
2353 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302354 goto err;
2355 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002356
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002357 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2358
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002359 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2360 error = ath_tx_edma_init(sc);
2361 if (error)
2362 goto err;
2363 }
2364
Sujith797fe5cb2009-03-30 15:28:45 +05302365err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002366 if (error != 0)
2367 ath_tx_cleanup(sc);
2368
2369 return error;
2370}
2371
Sujith797fe5cb2009-03-30 15:28:45 +05302372void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002373{
Sujithb77f4832008-12-07 21:44:03 +05302374 if (sc->beacon.bdma.dd_desc_len != 0)
2375 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376
Sujithb77f4832008-12-07 21:44:03 +05302377 if (sc->tx.txdma.dd_desc_len != 0)
2378 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002379
2380 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2381 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002382}
2383
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002384void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2385{
Sujithc5170162008-10-29 10:13:59 +05302386 struct ath_atx_tid *tid;
2387 struct ath_atx_ac *ac;
2388 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002389
Sujith8ee5afb2008-12-07 21:43:36 +05302390 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302391 tidno < WME_NUM_TID;
2392 tidno++, tid++) {
2393 tid->an = an;
2394 tid->tidno = tidno;
2395 tid->seq_start = tid->seq_next = 0;
2396 tid->baw_size = WME_MAX_BA;
2397 tid->baw_head = tid->baw_tail = 0;
2398 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302399 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302400 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002401 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302402 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302403 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302404 tid->state &= ~AGGR_ADDBA_COMPLETE;
2405 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302406 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002407
Sujith8ee5afb2008-12-07 21:43:36 +05302408 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302409 acno < WME_NUM_AC; acno++, ac++) {
2410 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002411 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302412 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002413 }
2414}
2415
Sujithb5aa9bf2008-10-29 10:13:31 +05302416void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002417{
Felix Fietkau2b409942010-07-07 19:42:08 +02002418 struct ath_atx_ac *ac;
2419 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002420 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002421 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302422
Felix Fietkau2b409942010-07-07 19:42:08 +02002423 for (tidno = 0, tid = &an->tid[tidno];
2424 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002425
Felix Fietkau2b409942010-07-07 19:42:08 +02002426 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002427 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002428
Felix Fietkau2b409942010-07-07 19:42:08 +02002429 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002430
Felix Fietkau2b409942010-07-07 19:42:08 +02002431 if (tid->sched) {
2432 list_del(&tid->list);
2433 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002434 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002435
2436 if (ac->sched) {
2437 list_del(&ac->list);
2438 tid->ac->sched = false;
2439 }
2440
2441 ath_tid_drain(sc, txq, tid);
2442 tid->state &= ~AGGR_ADDBA_COMPLETE;
2443 tid->state &= ~AGGR_CLEANUP;
2444
2445 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002446 }
2447}