blob: 485c0a3a9ce1f5525c9d05d750bca228bb6ea04e [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010059static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
61 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020062static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020064static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053068
Felix Fietkau545750d2009-11-23 22:21:01 +010069enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020070 MCS_HT20,
71 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010072 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
Felix Fietkau0e668cd2010-04-19 19:57:32 +020076static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010088 },
89 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020090 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010094 },
95 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020096 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100100 }
101};
102
Sujithe8324352009-01-16 21:38:42 +0530103/*********************/
104/* Aggregation logic */
105/*********************/
106
Sujithe8324352009-01-16 21:38:42 +0530107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
108{
109 struct ath_atx_ac *ac = tid->ac;
110
111 if (tid->paused)
112 return;
113
114 if (tid->sched)
115 return;
116
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
119
120 if (ac->sched)
121 return;
122
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
126
Sujithe8324352009-01-16 21:38:42 +0530127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
128{
Felix Fietkau066dae92010-11-07 14:59:39 +0100129 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530130
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200131 WARN_ON(!tid->paused);
132
Sujithe8324352009-01-16 21:38:42 +0530133 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200134 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530135
Felix Fietkau56dc6332011-08-28 00:32:22 +0200136 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530137 goto unlock;
138
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
143}
144
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100151}
152
Sujithe8324352009-01-16 21:38:42 +0530153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
Felix Fietkau066dae92010-11-07 14:59:39 +0100155 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200156 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530157 struct ath_buf *bf;
158 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200159 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100160 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200161
Sujithe8324352009-01-16 21:38:42 +0530162 INIT_LIST_HEAD(&bf_head);
163
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530165 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530166
Felix Fietkau56dc6332011-08-28 00:32:22 +0200167 while ((skb = __skb_dequeue(&tid->buf_q))) {
168 fi = get_frame_info(skb);
169 bf = fi->bf;
170
Felix Fietkaue1566d12010-11-20 03:08:46 +0100171 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200172 if (bf && fi->retries) {
173 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200176 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200177 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200178 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100179 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530180 }
181
182 spin_unlock_bh(&txq->axq_lock);
183}
184
185static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
186 int seqno)
187{
188 int index, cindex;
189
190 index = ATH_BA_INDEX(tid->seq_start, seqno);
191 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
192
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200193 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530194
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200195 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530196 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
197 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
198 }
199}
200
201static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100202 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530203{
204 int index, cindex;
205
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100206 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530207 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200208 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530209
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
214 }
215}
216
217/*
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
222 */
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
225
226{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200227 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530228 struct ath_buf *bf;
229 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700230 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100231 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700232
233 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530234 INIT_LIST_HEAD(&bf_head);
235
Felix Fietkau56dc6332011-08-28 00:32:22 +0200236 while ((skb = __skb_dequeue(&tid->buf_q))) {
237 fi = get_frame_info(skb);
238 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530239
Felix Fietkau44f1d262011-08-28 00:32:25 +0200240 if (!bf) {
241 spin_unlock(&txq->axq_lock);
242 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
243 spin_lock(&txq->axq_lock);
244 continue;
245 }
246
Felix Fietkau56dc6332011-08-28 00:32:22 +0200247 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530248
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100249 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200250 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530251
252 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700253 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530254 spin_lock(&txq->axq_lock);
255 }
256
257 tid->seq_next = tid->seq_start;
258 tid->baw_tail = tid->baw_head;
259}
260
Sujithfec247c2009-07-27 12:08:16 +0530261static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100262 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530263{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100264 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530265 struct ieee80211_hdr *hdr;
266
Sujithfec247c2009-07-27 12:08:16 +0530267 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100268 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100269 return;
Sujithe8324352009-01-16 21:38:42 +0530270
Sujithe8324352009-01-16 21:38:42 +0530271 hdr = (struct ieee80211_hdr *)skb->data;
272 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
273}
274
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200275static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
276{
277 struct ath_buf *bf = NULL;
278
279 spin_lock_bh(&sc->tx.txbuflock);
280
281 if (unlikely(list_empty(&sc->tx.txbuf))) {
282 spin_unlock_bh(&sc->tx.txbuflock);
283 return NULL;
284 }
285
286 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
287 list_del(&bf->list);
288
289 spin_unlock_bh(&sc->tx.txbuflock);
290
291 return bf;
292}
293
294static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
295{
296 spin_lock_bh(&sc->tx.txbuflock);
297 list_add_tail(&bf->list, &sc->tx.txbuf);
298 spin_unlock_bh(&sc->tx.txbuflock);
299}
300
Sujithd43f30152009-01-16 21:38:53 +0530301static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
302{
303 struct ath_buf *tbf;
304
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200305 tbf = ath_tx_get_buffer(sc);
306 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530307 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530308
309 ATH_TXBUF_RESET(tbf);
310
311 tbf->bf_mpdu = bf->bf_mpdu;
312 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400313 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530314 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530315
316 return tbf;
317}
318
Felix Fietkaub572d032010-11-14 15:20:07 +0100319static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
320 struct ath_tx_status *ts, int txok,
321 int *nframes, int *nbad)
322{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100323 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100324 u16 seq_st = 0;
325 u32 ba[WME_BA_BMP_SIZE >> 5];
326 int ba_index;
327 int isaggr = 0;
328
329 *nbad = 0;
330 *nframes = 0;
331
Felix Fietkaub572d032010-11-14 15:20:07 +0100332 isaggr = bf_isaggr(bf);
333 if (isaggr) {
334 seq_st = ts->ts_seqnum;
335 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
336 }
337
338 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100339 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200340 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100341
342 (*nframes)++;
343 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
344 (*nbad)++;
345
346 bf = bf->bf_next;
347 }
348}
349
350
Sujithd43f30152009-01-16 21:38:53 +0530351static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
352 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100353 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530354{
355 struct ath_node *an = NULL;
356 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530357 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100358 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530359 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800360 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530361 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530362 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200363 struct list_head bf_head;
364 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530365 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530366 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530367 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
368 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200369 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100370 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200371 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100372 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200373 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530374
Sujitha22be222009-03-30 15:28:36 +0530375 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530376 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530377
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800378 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800379
Felix Fietkau78c46532010-06-25 01:26:16 +0200380 memcpy(rates, tx_info->control.rates, sizeof(rates));
381
Sujith1286ec62009-01-27 13:30:37 +0530382 rcu_read_lock();
383
Ben Greear686b9cb2010-09-23 09:44:36 -0700384 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530385 if (!sta) {
386 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200387
Felix Fietkau31e79a52010-07-12 23:16:34 +0200388 INIT_LIST_HEAD(&bf_head);
389 while (bf) {
390 bf_next = bf->bf_next;
391
Felix Fietkaufce041b2011-05-19 12:20:25 +0200392 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200393 list_move_tail(&bf->list, &bf_head);
394
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100395 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200396 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
397 0, 0);
398
399 bf = bf_next;
400 }
Sujith1286ec62009-01-27 13:30:37 +0530401 return;
Sujithe8324352009-01-16 21:38:42 +0530402 }
403
Sujith1286ec62009-01-27 13:30:37 +0530404 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100405 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
406 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530407
Felix Fietkaub11b1602010-07-11 12:48:44 +0200408 /*
409 * The hardware occasionally sends a tx status for the wrong TID.
410 * In this case, the BA status cannot be considered valid and all
411 * subframes need to be retransmitted
412 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100413 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200414 txok = false;
415
Sujithe8324352009-01-16 21:38:42 +0530416 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530417 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530418
Sujithd43f30152009-01-16 21:38:53 +0530419 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700420 if (ts->ts_flags & ATH9K_TX_BA) {
421 seq_st = ts->ts_seqnum;
422 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530423 } else {
Sujithd43f30152009-01-16 21:38:53 +0530424 /*
425 * AR5416 can become deaf/mute when BA
426 * issue happens. Chip needs to be reset.
427 * But AP code may have sychronization issues
428 * when perform internal reset in this routine.
429 * Only enable reset in STA mode for now.
430 */
Sujith2660b812009-02-09 13:27:26 +0530431 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530432 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530433 }
434 }
435
Felix Fietkau56dc6332011-08-28 00:32:22 +0200436 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530437
Felix Fietkaub572d032010-11-14 15:20:07 +0100438 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530439 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200440 u16 seqno = bf->bf_state.seqno;
441
Felix Fietkauf0b82202011-01-15 14:30:15 +0100442 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530443 bf_next = bf->bf_next;
444
Felix Fietkau78c46532010-06-25 01:26:16 +0200445 skb = bf->bf_mpdu;
446 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100447 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200448
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200449 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530450 /* transmit completion, subframe is
451 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530452 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530453 } else if (!isaggr && txok) {
454 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530455 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530456 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200457 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530458 /*
459 * cleanup in progress, just fail
460 * the un-acked sub-frames
461 */
462 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200463 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
464 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
465 !an->sleeping)
466 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
467
468 clear_filter = true;
469 txpending = 1;
470 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200471 txfail = 1;
472 sendbar = 1;
473 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530474 }
475 }
476
Felix Fietkaufce041b2011-05-19 12:20:25 +0200477 /*
478 * Make sure the last desc is reclaimed if it
479 * not a holding desc.
480 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200481 INIT_LIST_HEAD(&bf_head);
482 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
483 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530484 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530485
Felix Fietkau90fa5392010-09-20 13:45:38 +0200486 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530487 /*
488 * complete the acked-ones/xretried ones; update
489 * block-ack window
490 */
491 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200492 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530493 spin_unlock_bh(&txq->axq_lock);
494
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530495 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200496 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100497 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530498 rc_update = false;
499 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100500 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530501 }
502
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700503 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
504 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530505 } else {
Sujithd43f30152009-01-16 21:38:53 +0530506 /* retry the un-acked ones */
Felix Fietkau55195412011-04-17 23:28:09 +0200507 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400508 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
509 if (bf->bf_next == NULL && bf_last->bf_stale) {
510 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530511
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400512 tbf = ath_clone_txbuf(sc, bf_last);
513 /*
514 * Update tx baw and complete the
515 * frame with failed status if we
516 * run out of tx buf.
517 */
518 if (!tbf) {
519 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200520 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400521 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400522
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100523 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100524 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400525 ath_tx_complete_buf(sc, bf, txq,
526 &bf_head,
Felix Fietkau55797b12011-09-14 21:24:16 +0200527 ts, 0, 1);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400528 break;
529 }
530
531 ath9k_hw_cleartxdesc(sc->sc_ah,
532 tbf->bf_desc);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200533 fi->bf = tbf;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400534 } else {
535 /*
536 * Clear descriptor status words for
537 * software retry
538 */
539 ath9k_hw_cleartxdesc(sc->sc_ah,
540 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400541 }
Sujithe8324352009-01-16 21:38:42 +0530542 }
543
544 /*
545 * Put this buffer to the temporary pending
546 * queue to retain ordering
547 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200548 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530549 }
550
551 bf = bf_next;
552 }
553
Felix Fietkau4cee7862010-07-23 03:53:16 +0200554 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200555 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200556 if (an->sleeping)
557 ieee80211_sta_set_tim(sta);
558
Felix Fietkau4cee7862010-07-23 03:53:16 +0200559 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200560 if (clear_filter)
561 tid->ac->clear_ps_filter = true;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200562 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600563 if (!an->sleeping)
564 ath_tx_queue_tid(txq, tid);
Felix Fietkau4cee7862010-07-23 03:53:16 +0200565 spin_unlock_bh(&txq->axq_lock);
566 }
567
Sujithe8324352009-01-16 21:38:42 +0530568 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200569 ath_tx_flush_tid(sc, tid);
570
Sujithe8324352009-01-16 21:38:42 +0530571 if (tid->baw_head == tid->baw_tail) {
572 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530573 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530574 }
Sujithe8324352009-01-16 21:38:42 +0530575 }
576
Sujith1286ec62009-01-27 13:30:37 +0530577 rcu_read_unlock();
578
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +0530579 if (needreset)
Felix Fietkau236de512011-09-03 01:40:25 +0200580 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Sujithe8324352009-01-16 21:38:42 +0530581}
582
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530583static bool ath_lookup_legacy(struct ath_buf *bf)
584{
585 struct sk_buff *skb;
586 struct ieee80211_tx_info *tx_info;
587 struct ieee80211_tx_rate *rates;
588 int i;
589
590 skb = bf->bf_mpdu;
591 tx_info = IEEE80211_SKB_CB(skb);
592 rates = tx_info->control.rates;
593
Felix Fietkau059ee092011-08-27 10:25:27 +0200594 for (i = 0; i < 4; i++) {
595 if (!rates[i].count || rates[i].idx < 0)
596 break;
597
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530598 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
599 return true;
600 }
601
602 return false;
603}
604
Sujithe8324352009-01-16 21:38:42 +0530605static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
606 struct ath_atx_tid *tid)
607{
Sujithe8324352009-01-16 21:38:42 +0530608 struct sk_buff *skb;
609 struct ieee80211_tx_info *tx_info;
610 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530611 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530612 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530613 int i;
614
Sujitha22be222009-03-30 15:28:36 +0530615 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530616 tx_info = IEEE80211_SKB_CB(skb);
617 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530618
619 /*
620 * Find the lowest frame length among the rate series that will have a
621 * 4ms transmit duration.
622 * TODO - TXOP limit needs to be considered.
623 */
624 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
625
626 for (i = 0; i < 4; i++) {
627 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100628 int modeidx;
629 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530630 legacy = 1;
631 break;
632 }
633
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200634 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100635 modeidx = MCS_HT40;
636 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200637 modeidx = MCS_HT20;
638
639 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
640 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100641
642 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530643 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530644 }
645 }
646
647 /*
648 * limit aggregate size by the minimum rate if rate selected is
649 * not a probe rate, if rate selected is a probe rate then
650 * avoid aggregation of this packet.
651 */
652 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
653 return 0;
654
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530655 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
656 aggr_limit = min((max_4ms_framelen * 3) / 8,
657 (u32)ATH_AMPDU_LIMIT_MAX);
658 else
659 aggr_limit = min(max_4ms_framelen,
660 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530661
662 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300663 * h/w can accept aggregates up to 16 bit lengths (65535).
664 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530665 * as zero. Ignore 65536 since we are constrained by hw.
666 */
Sujith4ef70842009-07-23 15:32:41 +0530667 if (tid->an->maxampdu)
668 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530669
670 return aggr_limit;
671}
672
673/*
Sujithd43f30152009-01-16 21:38:53 +0530674 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530675 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530676 */
677static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530678 struct ath_buf *bf, u16 frmlen,
679 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530680{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530681#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530682 struct sk_buff *skb = bf->bf_mpdu;
683 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530684 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530685 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100686 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200687 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100688 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530689
690 /* Select standard number of delimiters based on frame length alone */
691 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
692
693 /*
694 * If encryption enabled, hardware requires some more padding between
695 * subframes.
696 * TODO - this could be improved to be dependent on the rate.
697 * The hardware can keep up at lower rates, but not higher rates
698 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530699 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
700 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530701 ndelim += ATH_AGGR_ENCRYPTDELIM;
702
703 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530704 * Add delimiter when using RTS/CTS with aggregation
705 * and non enterprise AR9003 card
706 */
Felix Fietkau34597312011-08-29 18:57:54 +0200707 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
708 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530709 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
710
711 /*
Sujithe8324352009-01-16 21:38:42 +0530712 * Convert desired mpdu density from microeconds to bytes based
713 * on highest rate in rate series (i.e. first rate) to determine
714 * required minimum length for subframe. Take into account
715 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530716 *
Sujithe8324352009-01-16 21:38:42 +0530717 * If there is no mpdu density restriction, no further calculation
718 * is needed.
719 */
Sujith4ef70842009-07-23 15:32:41 +0530720
721 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530722 return ndelim;
723
724 rix = tx_info->control.rates[0].idx;
725 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530726 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
727 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
728
729 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530730 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530731 else
Sujith4ef70842009-07-23 15:32:41 +0530732 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530733
734 if (nsymbols == 0)
735 nsymbols = 1;
736
Felix Fietkauc6663872010-04-19 19:57:33 +0200737 streams = HT_RC_2_STREAMS(rix);
738 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530739 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
740
Sujithe8324352009-01-16 21:38:42 +0530741 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530742 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
743 ndelim = max(mindelim, ndelim);
744 }
745
746 return ndelim;
747}
748
749static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530750 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530751 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100752 struct list_head *bf_q,
753 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530754{
755#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200756 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530757 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530758 u16 aggr_limit = 0, al = 0, bpad = 0,
759 al_delta, h_baw = tid->baw_size / 2;
760 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200761 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100762 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200763 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200764 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530765
766 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200767 skb = skb_peek(&tid->buf_q);
768 fi = get_frame_info(skb);
769 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200770 if (!fi->bf)
771 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200772
Felix Fietkau44f1d262011-08-28 00:32:25 +0200773 if (!bf)
774 continue;
775
Felix Fietkau399c6482011-09-14 21:24:17 +0200776 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200777 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200778 if (!bf_first)
779 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530780
Sujithd43f30152009-01-16 21:38:53 +0530781 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200782 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530783 status = ATH_AGGR_BAW_CLOSED;
784 break;
785 }
786
787 if (!rl) {
788 aggr_limit = ath_lookup_rate(sc, bf, tid);
789 rl = 1;
790 }
791
Sujithd43f30152009-01-16 21:38:53 +0530792 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100793 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530794
Sujithd43f30152009-01-16 21:38:53 +0530795 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530796 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
797 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530798 status = ATH_AGGR_LIMITED;
799 break;
800 }
801
Felix Fietkau0299a502010-10-21 02:47:24 +0200802 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
803 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
804 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
805 break;
806
Sujithd43f30152009-01-16 21:38:53 +0530807 /* do not exceed subframe limit */
808 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530809 status = ATH_AGGR_LIMITED;
810 break;
811 }
812
Sujithd43f30152009-01-16 21:38:53 +0530813 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530814 al += bpad + al_delta;
815
816 /*
817 * Get the delimiters needed to meet the MPDU
818 * density for this node.
819 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530820 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
821 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530822 bpad = PADBYTES(al_delta) + (ndelim << 2);
823
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530824 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530825 bf->bf_next = NULL;
Sujithe8324352009-01-16 21:38:42 +0530826
Sujithd43f30152009-01-16 21:38:53 +0530827 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100828 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200829 ath_tx_addto_baw(sc, tid, seqno);
Felix Fietkau399c6482011-09-14 21:24:17 +0200830 bf->bf_state.ndelim = ndelim;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200831
832 __skb_unlink(skb, &tid->buf_q);
833 list_add_tail(&bf->list, bf_q);
Felix Fietkau399c6482011-09-14 21:24:17 +0200834 if (bf_prev)
Sujithe8324352009-01-16 21:38:42 +0530835 bf_prev->bf_next = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +0200836
Sujithe8324352009-01-16 21:38:42 +0530837 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530838
Felix Fietkau56dc6332011-08-28 00:32:22 +0200839 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530840
Felix Fietkau269c44b2010-11-14 15:20:06 +0100841 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530842
Sujithe8324352009-01-16 21:38:42 +0530843 return status;
844#undef PADBYTES
845}
846
Felix Fietkau38dad7b2011-09-14 21:24:18 +0200847/*
848 * rix - rate index
849 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
850 * width - 0 for 20 MHz, 1 for 40 MHz
851 * half_gi - to use 4us v/s 3.6 us for symbol time
852 */
853static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
854 int width, int half_gi, bool shortPreamble)
855{
856 u32 nbits, nsymbits, duration, nsymbols;
857 int streams;
858
859 /* find number of symbols: PLCP + data */
860 streams = HT_RC_2_STREAMS(rix);
861 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
862 nsymbits = bits_per_symbol[rix % 8][width] * streams;
863 nsymbols = (nbits + nsymbits - 1) / nsymbits;
864
865 if (!half_gi)
866 duration = SYMBOL_TIME(nsymbols);
867 else
868 duration = SYMBOL_TIME_HALFGI(nsymbols);
869
870 /* addup duration for legacy/ht training and signal fields */
871 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
872
873 return duration;
874}
875
876static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
877{
878 struct ath_hw *ah = sc->sc_ah;
879 struct ath9k_11n_rate_series series[4];
880 struct sk_buff *skb;
881 struct ieee80211_tx_info *tx_info;
882 struct ieee80211_tx_rate *rates;
883 const struct ieee80211_rate *rate;
884 struct ieee80211_hdr *hdr;
885 int i, flags = 0;
886 u8 rix = 0, ctsrate = 0;
887 bool is_pspoll;
888
889 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
890
891 skb = bf->bf_mpdu;
892 tx_info = IEEE80211_SKB_CB(skb);
893 rates = tx_info->control.rates;
894 hdr = (struct ieee80211_hdr *)skb->data;
895 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
896
897 /*
898 * We check if Short Preamble is needed for the CTS rate by
899 * checking the BSS's global flag.
900 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
901 */
902 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
903 ctsrate = rate->hw_value;
904 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
905 ctsrate |= rate->hw_value_short;
906
907 for (i = 0; i < 4; i++) {
908 bool is_40, is_sgi, is_sp;
909 int phy;
910
911 if (!rates[i].count || (rates[i].idx < 0))
912 continue;
913
914 rix = rates[i].idx;
915 series[i].Tries = rates[i].count;
916
917 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
918 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
919 flags |= ATH9K_TXDESC_RTSENA;
920 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
921 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
922 flags |= ATH9K_TXDESC_CTSENA;
923 }
924
925 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
926 series[i].RateFlags |= ATH9K_RATESERIES_2040;
927 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
928 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
929
930 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
931 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
932 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
933
934 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
935 /* MCS rates */
936 series[i].Rate = rix | 0x80;
937 series[i].ChSel = ath_txchainmask_reduction(sc,
938 ah->txchainmask, series[i].Rate);
939 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
940 is_40, is_sgi, is_sp);
941 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
942 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
943 continue;
944 }
945
946 /* legacy rates */
947 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
948 !(rate->flags & IEEE80211_RATE_ERP_G))
949 phy = WLAN_RC_PHY_CCK;
950 else
951 phy = WLAN_RC_PHY_OFDM;
952
953 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
954 series[i].Rate = rate->hw_value;
955 if (rate->hw_value_short) {
956 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
957 series[i].Rate |= rate->hw_value_short;
958 } else {
959 is_sp = false;
960 }
961
962 if (bf->bf_state.bfs_paprd)
963 series[i].ChSel = ah->txchainmask;
964 else
965 series[i].ChSel = ath_txchainmask_reduction(sc,
966 ah->txchainmask, series[i].Rate);
967
968 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
969 phy, rate->bitrate * 100, len, rix, is_sp);
970 }
971
972 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
973 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
974 flags &= ~ATH9K_TXDESC_RTSENA;
975
976 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
977 if (flags & ATH9K_TXDESC_RTSENA)
978 flags &= ~ATH9K_TXDESC_CTSENA;
979
980 /* set dur_update_en for l-sig computation except for PS-Poll frames */
981 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
982 bf->bf_lastbf->bf_desc,
983 !is_pspoll, ctsrate,
984 0, series, 4, flags);
985
986}
987
Felix Fietkau399c6482011-09-14 21:24:17 +0200988static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, int len)
989{
990 struct ath_hw *ah = sc->sc_ah;
991 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
992 struct ath_buf *bf_first = bf;
993
994 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
995 bool clrdmask = !!(tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT);
996
997 u32 ds_next;
998
999 ath_buf_set_rate(sc, bf, len);
1000
1001 while (bf) {
1002 if (bf->bf_next)
1003 ds_next = bf->bf_next->bf_daddr;
1004 else
1005 ds_next = 0;
1006
1007 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, clrdmask);
1008 if (!aggr)
1009 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
1010 else if (!bf->bf_next)
1011 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_desc);
1012 else {
1013 if (bf == bf_first)
1014 ath9k_hw_set11n_aggr_first(sc->sc_ah,
1015 bf->bf_desc, len);
1016
1017 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc,
1018 bf->bf_state.ndelim);
1019 }
1020
1021 ath9k_hw_set_desc_link(ah, bf->bf_desc, ds_next);
1022 bf = bf->bf_next;
1023 }
1024}
1025
Sujithe8324352009-01-16 21:38:42 +05301026static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1027 struct ath_atx_tid *tid)
1028{
Sujithd43f30152009-01-16 21:38:53 +05301029 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301030 enum ATH_AGGR_STATUS status;
Felix Fietkau399c6482011-09-14 21:24:17 +02001031 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +05301032 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001033 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +05301034
1035 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +02001036 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +05301037 return;
1038
1039 INIT_LIST_HEAD(&bf_q);
1040
Felix Fietkau269c44b2010-11-14 15:20:06 +01001041 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +05301042
1043 /*
Sujithd43f30152009-01-16 21:38:53 +05301044 * no frames picked up to be aggregated;
1045 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +05301046 */
1047 if (list_empty(&bf_q))
1048 break;
1049
1050 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +05301051 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Felix Fietkau399c6482011-09-14 21:24:17 +02001052 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +05301053
Felix Fietkau55195412011-04-17 23:28:09 +02001054 if (tid->ac->clear_ps_filter) {
1055 tid->ac->clear_ps_filter = false;
Felix Fietkau399c6482011-09-14 21:24:17 +02001056 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1057 } else {
1058 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
Felix Fietkau55195412011-04-17 23:28:09 +02001059 }
1060
Sujithd43f30152009-01-16 21:38:53 +05301061 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +01001062 if (bf == bf->bf_lastbf) {
Felix Fietkau399c6482011-09-14 21:24:17 +02001063 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1064 bf->bf_state.bf_type = BUF_AMPDU;
1065 } else {
1066 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +05301067 }
1068
Felix Fietkau399c6482011-09-14 21:24:17 +02001069 ath_tx_fill_desc(sc, bf, aggr_len);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001070 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001071 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +05301072 status != ATH_AGGR_BAW_CLOSED);
1073}
1074
Felix Fietkau231c3a12010-09-20 19:35:28 +02001075int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1076 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +05301077{
1078 struct ath_atx_tid *txtid;
1079 struct ath_node *an;
1080
1081 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +05301082 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +02001083
1084 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1085 return -EAGAIN;
1086
Sujithf83da962009-07-23 15:32:37 +05301087 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001088 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -07001089 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +02001090
Felix Fietkau2ed72222011-01-10 17:05:49 -07001091 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1092 txtid->baw_head = txtid->baw_tail = 0;
1093
Felix Fietkau231c3a12010-09-20 19:35:28 +02001094 return 0;
Sujithe8324352009-01-16 21:38:42 +05301095}
1096
Sujithf83da962009-07-23 15:32:37 +05301097void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +05301098{
1099 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1100 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +01001101 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +05301102
1103 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +05301104 return;
Sujithe8324352009-01-16 21:38:42 +05301105
1106 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +05301107 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +05301108 return;
Sujithe8324352009-01-16 21:38:42 +05301109 }
1110
Sujithe8324352009-01-16 21:38:42 +05301111 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +02001112 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +02001113
1114 /*
1115 * If frames are still being transmitted for this TID, they will be
1116 * cleaned up during tx completion. To prevent race conditions, this
1117 * TID can only be reused after all in-progress subframes have been
1118 * completed.
1119 */
1120 if (txtid->baw_head != txtid->baw_tail)
1121 txtid->state |= AGGR_CLEANUP;
1122 else
1123 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +05301124 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301125
Felix Fietkau90fa5392010-09-20 13:45:38 +02001126 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +05301127}
1128
Felix Fietkau55195412011-04-17 23:28:09 +02001129bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
1130{
1131 struct ath_atx_tid *tid;
1132 struct ath_atx_ac *ac;
1133 struct ath_txq *txq;
1134 bool buffered = false;
1135 int tidno;
1136
1137 for (tidno = 0, tid = &an->tid[tidno];
1138 tidno < WME_NUM_TID; tidno++, tid++) {
1139
1140 if (!tid->sched)
1141 continue;
1142
1143 ac = tid->ac;
1144 txq = ac->txq;
1145
1146 spin_lock_bh(&txq->axq_lock);
1147
Felix Fietkau56dc6332011-08-28 00:32:22 +02001148 if (!skb_queue_empty(&tid->buf_q))
Felix Fietkau55195412011-04-17 23:28:09 +02001149 buffered = true;
1150
1151 tid->sched = false;
1152 list_del(&tid->list);
1153
1154 if (ac->sched) {
1155 ac->sched = false;
1156 list_del(&ac->list);
1157 }
1158
1159 spin_unlock_bh(&txq->axq_lock);
1160 }
1161
1162 return buffered;
1163}
1164
1165void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1166{
1167 struct ath_atx_tid *tid;
1168 struct ath_atx_ac *ac;
1169 struct ath_txq *txq;
1170 int tidno;
1171
1172 for (tidno = 0, tid = &an->tid[tidno];
1173 tidno < WME_NUM_TID; tidno++, tid++) {
1174
1175 ac = tid->ac;
1176 txq = ac->txq;
1177
1178 spin_lock_bh(&txq->axq_lock);
1179 ac->clear_ps_filter = true;
1180
Felix Fietkau56dc6332011-08-28 00:32:22 +02001181 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001182 ath_tx_queue_tid(txq, tid);
1183 ath_txq_schedule(sc, txq);
1184 }
1185
1186 spin_unlock_bh(&txq->axq_lock);
1187 }
1188}
1189
Sujithe8324352009-01-16 21:38:42 +05301190void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1191{
1192 struct ath_atx_tid *txtid;
1193 struct ath_node *an;
1194
1195 an = (struct ath_node *)sta->drv_priv;
1196
1197 if (sc->sc_flags & SC_OP_TXAGGR) {
1198 txtid = ATH_AN_2_TID(an, tid);
1199 txtid->baw_size =
1200 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1201 txtid->state |= AGGR_ADDBA_COMPLETE;
1202 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1203 ath_tx_resume_tid(sc, txtid);
1204 }
1205}
1206
Sujithe8324352009-01-16 21:38:42 +05301207/********************/
1208/* Queue Management */
1209/********************/
1210
Sujithe8324352009-01-16 21:38:42 +05301211static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1212 struct ath_txq *txq)
1213{
1214 struct ath_atx_ac *ac, *ac_tmp;
1215 struct ath_atx_tid *tid, *tid_tmp;
1216
1217 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1218 list_del(&ac->list);
1219 ac->sched = false;
1220 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1221 list_del(&tid->list);
1222 tid->sched = false;
1223 ath_tid_drain(sc, txq, tid);
1224 }
1225 }
1226}
1227
1228struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1229{
Sujithcbe61d82009-02-09 13:27:12 +05301230 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001231 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301232 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001233 static const int subtype_txq_to_hwq[] = {
1234 [WME_AC_BE] = ATH_TXQ_AC_BE,
1235 [WME_AC_BK] = ATH_TXQ_AC_BK,
1236 [WME_AC_VI] = ATH_TXQ_AC_VI,
1237 [WME_AC_VO] = ATH_TXQ_AC_VO,
1238 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001239 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301240
1241 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001242 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301243 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1244 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1245 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1246 qi.tqi_physCompBuf = 0;
1247
1248 /*
1249 * Enable interrupts only for EOL and DESC conditions.
1250 * We mark tx descriptors to receive a DESC interrupt
1251 * when a tx queue gets deep; otherwise waiting for the
1252 * EOL to reap descriptors. Note that this is done to
1253 * reduce interrupt load and this only defers reaping
1254 * descriptors, never transmitting frames. Aside from
1255 * reducing interrupts this also permits more concurrency.
1256 * The only potential downside is if the tx queue backs
1257 * up in which case the top half of the kernel may backup
1258 * due to a lack of tx descriptors.
1259 *
1260 * The UAPSD queue is an exception, since we take a desc-
1261 * based intr on the EOSP frames.
1262 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001263 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1264 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1265 TXQ_FLAG_TXERRINT_ENABLE;
1266 } else {
1267 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1268 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1269 else
1270 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1271 TXQ_FLAG_TXDESCINT_ENABLE;
1272 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001273 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1274 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301275 /*
1276 * NB: don't print a message, this happens
1277 * normally on parts with too few tx queues
1278 */
1279 return NULL;
1280 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001281 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001282 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001283 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1284 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301285 return NULL;
1286 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001287 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1288 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301289
Ben Greear60f2d1d2011-01-09 23:11:52 -08001290 txq->axq_qnum = axq_qnum;
1291 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301292 txq->axq_link = NULL;
1293 INIT_LIST_HEAD(&txq->axq_q);
1294 INIT_LIST_HEAD(&txq->axq_acq);
1295 spin_lock_init(&txq->axq_lock);
1296 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001297 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001298 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001299 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001300
1301 txq->txq_headidx = txq->txq_tailidx = 0;
1302 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1303 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301304 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001305 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301306}
1307
Sujithe8324352009-01-16 21:38:42 +05301308int ath_txq_update(struct ath_softc *sc, int qnum,
1309 struct ath9k_tx_queue_info *qinfo)
1310{
Sujithcbe61d82009-02-09 13:27:12 +05301311 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301312 int error = 0;
1313 struct ath9k_tx_queue_info qi;
1314
1315 if (qnum == sc->beacon.beaconq) {
1316 /*
1317 * XXX: for beacon queue, we just save the parameter.
1318 * It will be picked up by ath_beaconq_config when
1319 * it's necessary.
1320 */
1321 sc->beacon.beacon_qi = *qinfo;
1322 return 0;
1323 }
1324
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001325 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301326
1327 ath9k_hw_get_txq_props(ah, qnum, &qi);
1328 qi.tqi_aifs = qinfo->tqi_aifs;
1329 qi.tqi_cwmin = qinfo->tqi_cwmin;
1330 qi.tqi_cwmax = qinfo->tqi_cwmax;
1331 qi.tqi_burstTime = qinfo->tqi_burstTime;
1332 qi.tqi_readyTime = qinfo->tqi_readyTime;
1333
1334 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001335 ath_err(ath9k_hw_common(sc->sc_ah),
1336 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301337 error = -EIO;
1338 } else {
1339 ath9k_hw_resettxqueue(ah, qnum);
1340 }
1341
1342 return error;
1343}
1344
1345int ath_cabq_update(struct ath_softc *sc)
1346{
1347 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001348 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301349 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301350
1351 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1352 /*
1353 * Ensure the readytime % is within the bounds.
1354 */
Sujith17d79042009-02-09 13:27:03 +05301355 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1356 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1357 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1358 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301359
Steve Brown9814f6b2011-02-07 17:10:39 -07001360 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301361 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301362 ath_txq_update(sc, qnum, &qi);
1363
1364 return 0;
1365}
1366
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001367static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1368{
1369 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1370 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1371}
1372
Felix Fietkaufce041b2011-05-19 12:20:25 +02001373static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1374 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301375 __releases(txq->axq_lock)
1376 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301377{
1378 struct ath_buf *bf, *lastbf;
1379 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001380 struct ath_tx_status ts;
1381
1382 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301383 INIT_LIST_HEAD(&bf_head);
1384
Felix Fietkaufce041b2011-05-19 12:20:25 +02001385 while (!list_empty(list)) {
1386 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301387
Felix Fietkaufce041b2011-05-19 12:20:25 +02001388 if (bf->bf_stale) {
1389 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301390
Felix Fietkaufce041b2011-05-19 12:20:25 +02001391 ath_tx_return_buffer(sc, bf);
1392 continue;
Sujithe8324352009-01-16 21:38:42 +05301393 }
1394
1395 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001396 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001397
Sujithe8324352009-01-16 21:38:42 +05301398 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001399 if (bf_is_ampdu_not_probing(bf))
1400 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301401
Felix Fietkaufce041b2011-05-19 12:20:25 +02001402 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301403 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001404 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1405 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301406 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001407 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001408 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001409 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001410}
1411
1412/*
1413 * Drain a given TX queue (could be Beacon or Data)
1414 *
1415 * This assumes output has been stopped and
1416 * we do not need to block ath_tx_tasklet.
1417 */
1418void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1419{
1420 spin_lock_bh(&txq->axq_lock);
1421 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1422 int idx = txq->txq_tailidx;
1423
1424 while (!list_empty(&txq->txq_fifo[idx])) {
1425 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1426 retry_tx);
1427
1428 INCR(idx, ATH_TXFIFO_DEPTH);
1429 }
1430 txq->txq_tailidx = idx;
1431 }
1432
1433 txq->axq_link = NULL;
1434 txq->axq_tx_inprogress = false;
1435 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001436
1437 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001438 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1439 ath_txq_drain_pending_buffers(sc, txq);
1440
1441 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301442}
1443
Felix Fietkau080e1a22010-12-05 20:17:53 +01001444bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301445{
Sujithcbe61d82009-02-09 13:27:12 +05301446 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001447 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301448 struct ath_txq *txq;
1449 int i, npend = 0;
1450
1451 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001452 return true;
Sujith043a0402009-01-16 21:38:47 +05301453
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001454 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301455
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001456 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301457 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001458 if (!ATH_TXQ_SETUP(sc, i))
1459 continue;
1460
1461 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301462 }
1463
Felix Fietkau080e1a22010-12-05 20:17:53 +01001464 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001465 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301466
1467 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001468 if (!ATH_TXQ_SETUP(sc, i))
1469 continue;
1470
1471 /*
1472 * The caller will resume queues with ieee80211_wake_queues.
1473 * Mark the queue as not stopped to prevent ath_tx_complete
1474 * from waking the queue too early.
1475 */
1476 txq = &sc->tx.txq[i];
1477 txq->stopped = false;
1478 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301479 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001480
1481 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301482}
1483
Sujithe8324352009-01-16 21:38:42 +05301484void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1485{
1486 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1487 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1488}
1489
Ben Greear7755bad2011-01-18 17:30:00 -08001490/* For each axq_acq entry, for each tid, try to schedule packets
1491 * for transmit until ampdu_depth has reached min Q depth.
1492 */
Sujithe8324352009-01-16 21:38:42 +05301493void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1494{
Ben Greear7755bad2011-01-18 17:30:00 -08001495 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1496 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301497
Felix Fietkau236de512011-09-03 01:40:25 +02001498 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
Felix Fietkau21f28e62011-01-15 14:30:14 +01001499 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301500 return;
1501
1502 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001503 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301504
Ben Greear7755bad2011-01-18 17:30:00 -08001505 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1506 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1507 list_del(&ac->list);
1508 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301509
Ben Greear7755bad2011-01-18 17:30:00 -08001510 while (!list_empty(&ac->tid_q)) {
1511 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1512 list);
1513 list_del(&tid->list);
1514 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301515
Ben Greear7755bad2011-01-18 17:30:00 -08001516 if (tid->paused)
1517 continue;
Sujithe8324352009-01-16 21:38:42 +05301518
Ben Greear7755bad2011-01-18 17:30:00 -08001519 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301520
Ben Greear7755bad2011-01-18 17:30:00 -08001521 /*
1522 * add tid to round-robin queue if more frames
1523 * are pending for the tid
1524 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001525 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001526 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301527
Ben Greear7755bad2011-01-18 17:30:00 -08001528 if (tid == last_tid ||
1529 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1530 break;
Sujithe8324352009-01-16 21:38:42 +05301531 }
Ben Greear7755bad2011-01-18 17:30:00 -08001532
1533 if (!list_empty(&ac->tid_q)) {
1534 if (!ac->sched) {
1535 ac->sched = true;
1536 list_add_tail(&ac->list, &txq->axq_acq);
1537 }
1538 }
1539
1540 if (ac == last_ac ||
1541 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1542 return;
Sujithe8324352009-01-16 21:38:42 +05301543 }
1544}
1545
Sujithe8324352009-01-16 21:38:42 +05301546/***********/
1547/* TX, DMA */
1548/***********/
1549
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001550/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001551 * Insert a chain of ath_buf (descriptors) on a txq and
1552 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001553 */
Sujith102e0572008-10-29 10:15:16 +05301554static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001555 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001556{
Sujithcbe61d82009-02-09 13:27:12 +05301557 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001558 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001559 struct ath_buf *bf, *bf_last;
1560 bool puttxbuf = false;
1561 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301562
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001563 /*
1564 * Insert the frame on the outbound list and
1565 * pass it on to the hardware.
1566 */
1567
1568 if (list_empty(head))
1569 return;
1570
Felix Fietkaufce041b2011-05-19 12:20:25 +02001571 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001572 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001573 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001574
Joe Perches226afe62010-12-02 19:12:37 -08001575 ath_dbg(common, ATH_DBG_QUEUE,
1576 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001577
Felix Fietkaufce041b2011-05-19 12:20:25 +02001578 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1579 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001580 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001581 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001582 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001583 list_splice_tail_init(head, &txq->axq_q);
1584
Felix Fietkaufce041b2011-05-19 12:20:25 +02001585 if (txq->axq_link) {
1586 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001587 ath_dbg(common, ATH_DBG_XMIT,
1588 "link[%u] (%p)=%llx (%p)\n",
1589 txq->axq_qnum, txq->axq_link,
1590 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001591 } else if (!edma)
1592 puttxbuf = true;
1593
1594 txq->axq_link = bf_last->bf_desc;
1595 }
1596
1597 if (puttxbuf) {
1598 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1599 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1600 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1601 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1602 }
1603
1604 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001605 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001606 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001607 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001608
1609 if (!internal) {
1610 txq->axq_depth++;
1611 if (bf_is_ampdu_not_probing(bf))
1612 txq->axq_ampdu_depth++;
1613 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001614}
1615
Sujithe8324352009-01-16 21:38:42 +05301616static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001617 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301618{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001619 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001620 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001621 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301622
1623 /*
1624 * Do not queue to h/w when any of the following conditions is true:
1625 * - there are pending frames in software queue
1626 * - the TID is currently paused for ADDBA/BAR request
1627 * - seqno is not within block-ack window
1628 * - h/w queue depth exceeds low water mark
1629 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001630 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001631 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001632 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001633 /*
Sujithe8324352009-01-16 21:38:42 +05301634 * Add this frame to software queue for scheduling later
1635 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001636 */
Ben Greearbda8add2011-01-09 23:11:48 -08001637 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001638 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001639 if (!txctl->an || !txctl->an->sleeping)
1640 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301641 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001642 }
1643
Felix Fietkau44f1d262011-08-28 00:32:25 +02001644 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1645 if (!bf)
1646 return;
1647
Felix Fietkau399c6482011-09-14 21:24:17 +02001648 bf->bf_state.bf_type = BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001649 INIT_LIST_HEAD(&bf_head);
1650 list_add(&bf->list, &bf_head);
1651
Sujithe8324352009-01-16 21:38:42 +05301652 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001653 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301654
1655 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001656 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301657 bf->bf_lastbf = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +02001658 ath_tx_fill_desc(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001659 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301660}
1661
Felix Fietkau82b873a2010-11-11 03:18:37 +01001662static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001663 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001664{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001665 struct ath_frame_info *fi = get_frame_info(skb);
1666 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301667 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001668
Felix Fietkau44f1d262011-08-28 00:32:25 +02001669 bf = fi->bf;
1670 if (!bf)
1671 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1672
1673 if (!bf)
1674 return;
1675
1676 INIT_LIST_HEAD(&bf_head);
1677 list_add_tail(&bf->list, &bf_head);
Felix Fietkau399c6482011-09-14 21:24:17 +02001678 bf->bf_state.bf_type = 0;
Sujithe8324352009-01-16 21:38:42 +05301679
1680 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001681 if (tid)
1682 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301683
Sujithd43f30152009-01-16 21:38:53 +05301684 bf->bf_lastbf = bf;
Felix Fietkau399c6482011-09-14 21:24:17 +02001685 ath_tx_fill_desc(sc, bf, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001686 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301687 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001688}
1689
Sujith528f0c62008-10-29 10:14:26 +05301690static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001691{
Sujith528f0c62008-10-29 10:14:26 +05301692 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001693 enum ath9k_pkt_type htype;
1694 __le16 fc;
1695
Sujith528f0c62008-10-29 10:14:26 +05301696 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001697 fc = hdr->frame_control;
1698
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001699 if (ieee80211_is_beacon(fc))
1700 htype = ATH9K_PKT_TYPE_BEACON;
1701 else if (ieee80211_is_probe_resp(fc))
1702 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1703 else if (ieee80211_is_atim(fc))
1704 htype = ATH9K_PKT_TYPE_ATIM;
1705 else if (ieee80211_is_pspoll(fc))
1706 htype = ATH9K_PKT_TYPE_PSPOLL;
1707 else
1708 htype = ATH9K_PKT_TYPE_NORMAL;
1709
1710 return htype;
1711}
1712
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001713static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1714 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301715{
1716 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001717 struct ieee80211_sta *sta = tx_info->control.sta;
1718 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001719 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001720 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001721 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001722 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301723
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001724 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301725
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001726 if (sta)
1727 an = (struct ath_node *) sta->drv_priv;
1728
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001729 memset(fi, 0, sizeof(*fi));
1730 if (hw_key)
1731 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001732 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1733 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001734 else
1735 fi->keyix = ATH9K_TXKEYIX_INVALID;
1736 fi->keytype = keytype;
1737 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301738}
1739
Felix Fietkau82b873a2010-11-11 03:18:37 +01001740static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301741{
1742 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1743 int flags = 0;
1744
Sujith528f0c62008-10-29 10:14:26 +05301745 flags |= ATH9K_TXDESC_INTREQ;
1746
1747 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1748 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301749
Felix Fietkau82b873a2010-11-11 03:18:37 +01001750 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001751 flags |= ATH9K_TXDESC_LDPC;
1752
Sujith528f0c62008-10-29 10:14:26 +05301753 return flags;
1754}
1755
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301756u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1757{
1758 struct ath_hw *ah = sc->sc_ah;
1759 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301760 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1761 (curchan->channelFlags & CHANNEL_5GHZ) &&
1762 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301763 return 0x3;
1764 else
1765 return chainmask;
1766}
1767
Felix Fietkau44f1d262011-08-28 00:32:25 +02001768/*
1769 * Assign a descriptor (and sequence number if necessary,
1770 * and map buffer for DMA. Frees skb on error
1771 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001772static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001773 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001774 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001775 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301776{
Felix Fietkau04caf862010-11-14 15:20:12 +01001777 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001778 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001779 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001780 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001781 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001782 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001783 int frm_type;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001784 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001785
1786 bf = ath_tx_get_buffer(sc);
1787 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001788 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001789 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001790 }
Sujithe8324352009-01-16 21:38:42 +05301791
Sujithe8324352009-01-16 21:38:42 +05301792 ATH_TXBUF_RESET(bf);
1793
Felix Fietkaufa05f872011-08-28 00:32:24 +02001794 if (tid) {
1795 seqno = tid->seq_next;
1796 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1797 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1798 bf->bf_state.seqno = seqno;
1799 }
1800
Felix Fietkau82b873a2010-11-11 03:18:37 +01001801 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301802 bf->bf_mpdu = skb;
1803
Ben Greearc1739eb32010-10-14 12:45:29 -07001804 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1805 skb->len, DMA_TO_DEVICE);
1806 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301807 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001808 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001809 ath_err(ath9k_hw_common(sc->sc_ah),
1810 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001811 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001812 goto error;
Sujithe8324352009-01-16 21:38:42 +05301813 }
1814
Sujithe8324352009-01-16 21:38:42 +05301815 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301816
1817 ds = bf->bf_desc;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001818 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1819 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301820
1821 ath9k_hw_filltxdesc(ah, ds,
1822 skb->len, /* segment length */
1823 true, /* first segment */
1824 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001825 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001826 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001827 txq->axq_qnum);
1828
Felix Fietkau56dc6332011-08-28 00:32:22 +02001829 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001830
1831 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001832
1833error:
1834 dev_kfree_skb_any(skb);
1835 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001836}
1837
1838/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001839static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001840 struct ath_tx_control *txctl)
1841{
Felix Fietkau04caf862010-11-14 15:20:12 +01001842 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1843 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001844 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001845 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001846 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301847
Sujithe8324352009-01-16 21:38:42 +05301848 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301849 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1850 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001851 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1852 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001853 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001854
Felix Fietkau066dae92010-11-07 14:59:39 +01001855 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001856 }
1857
1858 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001859 /*
1860 * Try aggregation if it's a unicast data frame
1861 * and the destination is HT capable.
1862 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001863 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301864 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001865 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1866 if (!bf)
1867 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001868
Felix Fietkau82b873a2010-11-11 03:18:37 +01001869 bf->bf_state.bfs_paprd = txctl->paprd;
1870
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001871 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001872 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1873 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001874
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301875 if (txctl->paprd)
1876 bf->bf_state.bfs_paprd_timestamp = jiffies;
1877
Felix Fietkau44f1d262011-08-28 00:32:25 +02001878 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301879 }
1880
Felix Fietkaufa05f872011-08-28 00:32:24 +02001881out:
Sujithe8324352009-01-16 21:38:42 +05301882 spin_unlock_bh(&txctl->txq->axq_lock);
1883}
1884
1885/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001886int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301887 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001888{
Felix Fietkau28d16702010-11-14 15:20:10 +01001889 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1890 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001891 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001892 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001893 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001894 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001895 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001896 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001897 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001898
Ben Greeara9927ba2010-12-06 21:13:49 -08001899 /* NOTE: sta can be NULL according to net/mac80211.h */
1900 if (sta)
1901 txctl->an = (struct ath_node *)sta->drv_priv;
1902
Felix Fietkau04caf862010-11-14 15:20:12 +01001903 if (info->control.hw_key)
1904 frmlen += info->control.hw_key->icv_len;
1905
Felix Fietkau28d16702010-11-14 15:20:10 +01001906 /*
1907 * As a temporary workaround, assign seq# here; this will likely need
1908 * to be cleaned up to work better with Beacon transmission and virtual
1909 * BSSes.
1910 */
1911 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1912 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1913 sc->tx.seq_no += 0x10;
1914 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1915 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1916 }
1917
1918 /* Add the padding after the header if this is not already done */
1919 padpos = ath9k_cmn_padpos(hdr->frame_control);
1920 padsize = padpos & 3;
1921 if (padsize && skb->len > padpos) {
1922 if (skb_headroom(skb) < padsize)
1923 return -ENOMEM;
1924
1925 skb_push(skb, padsize);
1926 memmove(skb->data, skb->data + padsize, padpos);
1927 }
1928
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001929 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1930 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1931 !ieee80211_is_data(hdr->frame_control))
1932 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1933
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001934 setup_frame_info(hw, skb, frmlen);
1935
1936 /*
1937 * At this point, the vif, hw_key and sta pointers in the tx control
1938 * info are no longer valid (overwritten by the ath_frame_info data.
1939 */
1940
Felix Fietkau066dae92010-11-07 14:59:39 +01001941 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001942 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001943 if (txq == sc->tx.txq_map[q] &&
1944 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001945 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001946 txq->stopped = 1;
1947 }
1948 spin_unlock_bh(&txq->axq_lock);
1949
Felix Fietkau44f1d262011-08-28 00:32:25 +02001950 ath_tx_start_dma(sc, skb, txctl);
1951 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001952}
1953
Sujithe8324352009-01-16 21:38:42 +05301954/*****************/
1955/* TX Completion */
1956/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001957
Sujithe8324352009-01-16 21:38:42 +05301958static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301959 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001960{
Sujithe8324352009-01-16 21:38:42 +05301961 struct ieee80211_hw *hw = sc->hw;
1962 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001963 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001964 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001965 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301966
Joe Perches226afe62010-12-02 19:12:37 -08001967 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301968
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301969 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301970 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301971
Felix Fietkau55797b12011-09-14 21:24:16 +02001972 if (!(tx_flags & ATH_TX_ERROR))
Sujithe8324352009-01-16 21:38:42 +05301973 /* Frame was ACKed */
1974 tx_info->flags |= IEEE80211_TX_STAT_ACK;
Sujithe8324352009-01-16 21:38:42 +05301975
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001976 padpos = ath9k_cmn_padpos(hdr->frame_control);
1977 padsize = padpos & 3;
1978 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301979 /*
1980 * Remove MAC header padding before giving the frame back to
1981 * mac80211.
1982 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001983 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301984 skb_pull(skb, padsize);
1985 }
1986
Sujith1b04b932010-01-08 10:36:05 +05301987 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1988 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001989 ath_dbg(common, ATH_DBG_PS,
1990 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301991 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1992 PS_WAIT_FOR_CAB |
1993 PS_WAIT_FOR_PSPOLL_DATA |
1994 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001995 }
1996
Felix Fietkau7545daf2011-01-24 19:23:16 +01001997 q = skb_get_queue_mapping(skb);
1998 if (txq == sc->tx.txq_map[q]) {
1999 spin_lock_bh(&txq->axq_lock);
2000 if (WARN_ON(--txq->pending_frames < 0))
2001 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01002002
Felix Fietkau7545daf2011-01-24 19:23:16 +01002003 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
2004 ieee80211_wake_queue(sc->hw, q);
2005 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01002006 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01002007 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04002008 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01002009
2010 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05302011}
2012
2013static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002014 struct ath_txq *txq, struct list_head *bf_q,
2015 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05302016{
2017 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05302018 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302019 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05302020
Sujithe8324352009-01-16 21:38:42 +05302021 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302022 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05302023
Felix Fietkau55797b12011-09-14 21:24:16 +02002024 if (!txok)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302025 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05302026
Ben Greearc1739eb32010-10-14 12:45:29 -07002027 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07002028 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002029
2030 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302031 if (time_after(jiffies,
2032 bf->bf_state.bfs_paprd_timestamp +
2033 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002034 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002035 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002036 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002037 } else {
Felix Fietkau55797b12011-09-14 21:24:16 +02002038 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302039 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002040 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002041 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2042 * accidentally reference it later.
2043 */
2044 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302045
2046 /*
2047 * Return the list of ath_buf of this mpdu to free queue
2048 */
2049 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2050 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2051 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2052}
2053
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002054static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2055 struct ath_tx_status *ts, int nframes, int nbad,
2056 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05302057{
Sujitha22be222009-03-30 15:28:36 +05302058 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302059 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302060 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002061 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002062 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302063 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302064
Sujith95e4acb2009-03-13 08:56:09 +05302065 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002066 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302067
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002068 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302069 WARN_ON(tx_rateindex >= hw->max_rates);
2070
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002071 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302072 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02002073 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002074 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302075
Felix Fietkaub572d032010-11-14 15:20:07 +01002076 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002077
Felix Fietkaub572d032010-11-14 15:20:07 +01002078 tx_info->status.ampdu_len = nframes;
2079 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002080 }
2081
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002082 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302083 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002084 /*
2085 * If an underrun error is seen assume it as an excessive
2086 * retry only if max frame trigger level has been reached
2087 * (2 KB for single stream, and 4 KB for dual stream).
2088 * Adjust the long retry as if the frame was tried
2089 * hw->max_rate_tries times to affect how rate control updates
2090 * PER for the failed rate.
2091 * In case of congestion on the bus penalizing this type of
2092 * underruns should help hardware actually transmit new frames
2093 * successfully by eventually preferring slower rates.
2094 * This itself should also alleviate congestion on the bus.
2095 */
2096 if (ieee80211_is_data(hdr->frame_control) &&
2097 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2098 ATH9K_TX_DELIM_UNDERRUN)) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002099 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002100 tx_info->status.rates[tx_rateindex].count =
2101 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302102 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302103
Felix Fietkau545750d2009-11-23 22:21:01 +01002104 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302105 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002106 tx_info->status.rates[i].idx = -1;
2107 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302108
Felix Fietkau78c46532010-06-25 01:26:16 +02002109 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302110}
2111
Felix Fietkaufce041b2011-05-19 12:20:25 +02002112static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2113 struct ath_tx_status *ts, struct ath_buf *bf,
2114 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302115 __releases(txq->axq_lock)
2116 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002117{
2118 int txok;
2119
2120 txq->axq_depth--;
2121 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2122 txq->axq_tx_inprogress = false;
2123 if (bf_is_ampdu_not_probing(bf))
2124 txq->axq_ampdu_depth--;
2125
2126 spin_unlock_bh(&txq->axq_lock);
2127
2128 if (!bf_isampdu(bf)) {
Felix Fietkaufce041b2011-05-19 12:20:25 +02002129 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
2130 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2131 } else
2132 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2133
2134 spin_lock_bh(&txq->axq_lock);
2135
2136 if (sc->sc_flags & SC_OP_TXAGGR)
2137 ath_txq_schedule(sc, txq);
2138}
2139
Sujithc4288392008-11-18 09:09:30 +05302140static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002141{
Sujithcbe61d82009-02-09 13:27:12 +05302142 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002143 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002144 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2145 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302146 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002147 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002148 int status;
2149
Joe Perches226afe62010-12-02 19:12:37 -08002150 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2151 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2152 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002153
Felix Fietkaufce041b2011-05-19 12:20:25 +02002154 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002155 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002156 if (work_pending(&sc->hw_reset_work))
2157 break;
2158
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002159 if (list_empty(&txq->axq_q)) {
2160 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002161 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002162 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002163 break;
2164 }
2165 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2166
2167 /*
2168 * There is a race condition that a BH gets scheduled
2169 * after sw writes TxE and before hw re-load the last
2170 * descriptor to get the newly chained one.
2171 * Software must keep the last DONE descriptor as a
2172 * holding descriptor - software does so by marking
2173 * it with the STALE flag.
2174 */
2175 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302176 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002177 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002178 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002179 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002180
2181 bf = list_entry(bf_held->list.next, struct ath_buf,
2182 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002183 }
2184
2185 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302186 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002187
Felix Fietkau29bffa92010-03-29 20:14:23 -07002188 memset(&ts, 0, sizeof(ts));
2189 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002190 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002191 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002192
Ben Greear2dac4fb2011-01-09 23:11:45 -08002193 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002194
2195 /*
2196 * Remove ath_buf's of the same transmit unit from txq,
2197 * however leave the last descriptor back as the holding
2198 * descriptor for hw.
2199 */
Sujitha119cc42009-03-30 15:28:38 +05302200 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002201 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002202 if (!list_is_singular(&lastbf->list))
2203 list_cut_position(&bf_head,
2204 &txq->axq_q, lastbf->list.prev);
2205
Felix Fietkaufce041b2011-05-19 12:20:25 +02002206 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002207 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002208 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002209 }
Johannes Berge6a98542008-10-21 12:40:02 +02002210
Felix Fietkaufce041b2011-05-19 12:20:25 +02002211 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002212 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002213 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002214}
2215
Sujith305fe472009-07-23 15:32:29 +05302216static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002217{
2218 struct ath_softc *sc = container_of(work, struct ath_softc,
2219 tx_complete_work.work);
2220 struct ath_txq *txq;
2221 int i;
2222 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002223#ifdef CONFIG_ATH9K_DEBUGFS
2224 sc->tx_complete_poll_work_seen++;
2225#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002226
2227 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2228 if (ATH_TXQ_SETUP(sc, i)) {
2229 txq = &sc->tx.txq[i];
2230 spin_lock_bh(&txq->axq_lock);
2231 if (txq->axq_depth) {
2232 if (txq->axq_tx_inprogress) {
2233 needreset = true;
2234 spin_unlock_bh(&txq->axq_lock);
2235 break;
2236 } else {
2237 txq->axq_tx_inprogress = true;
2238 }
2239 }
2240 spin_unlock_bh(&txq->axq_lock);
2241 }
2242
2243 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002244 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2245 "tx hung, resetting the chip\n");
Felix Fietkau236de512011-09-03 01:40:25 +02002246 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002247 }
2248
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002249 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002250 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2251}
2252
2253
Sujithe8324352009-01-16 21:38:42 +05302254
2255void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002256{
Sujithe8324352009-01-16 21:38:42 +05302257 int i;
2258 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002259
Sujithe8324352009-01-16 21:38:42 +05302260 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002261
2262 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302263 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2264 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002265 }
2266}
2267
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002268void ath_tx_edma_tasklet(struct ath_softc *sc)
2269{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002270 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002271 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2272 struct ath_hw *ah = sc->sc_ah;
2273 struct ath_txq *txq;
2274 struct ath_buf *bf, *lastbf;
2275 struct list_head bf_head;
2276 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002277
2278 for (;;) {
Felix Fietkau236de512011-09-03 01:40:25 +02002279 if (work_pending(&sc->hw_reset_work))
2280 break;
2281
Felix Fietkaufce041b2011-05-19 12:20:25 +02002282 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002283 if (status == -EINPROGRESS)
2284 break;
2285 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002286 ath_dbg(common, ATH_DBG_XMIT,
2287 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002288 break;
2289 }
2290
2291 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002292 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002293 continue;
2294
Felix Fietkaufce041b2011-05-19 12:20:25 +02002295 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002296
2297 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002298
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002299 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2300 spin_unlock_bh(&txq->axq_lock);
2301 return;
2302 }
2303
2304 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2305 struct ath_buf, list);
2306 lastbf = bf->bf_lastbf;
2307
2308 INIT_LIST_HEAD(&bf_head);
2309 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2310 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002311
Felix Fietkaufce041b2011-05-19 12:20:25 +02002312 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2313 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002314
Felix Fietkaufce041b2011-05-19 12:20:25 +02002315 if (!list_empty(&txq->axq_q)) {
2316 struct list_head bf_q;
2317
2318 INIT_LIST_HEAD(&bf_q);
2319 txq->axq_link = NULL;
2320 list_splice_tail_init(&txq->axq_q, &bf_q);
2321 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2322 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002323 }
2324
Felix Fietkaufce041b2011-05-19 12:20:25 +02002325 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002326 spin_unlock_bh(&txq->axq_lock);
2327 }
2328}
2329
Sujithe8324352009-01-16 21:38:42 +05302330/*****************/
2331/* Init, Cleanup */
2332/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002333
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002334static int ath_txstatus_setup(struct ath_softc *sc, int size)
2335{
2336 struct ath_descdma *dd = &sc->txsdma;
2337 u8 txs_len = sc->sc_ah->caps.txs_len;
2338
2339 dd->dd_desc_len = size * txs_len;
2340 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2341 &dd->dd_desc_paddr, GFP_KERNEL);
2342 if (!dd->dd_desc)
2343 return -ENOMEM;
2344
2345 return 0;
2346}
2347
2348static int ath_tx_edma_init(struct ath_softc *sc)
2349{
2350 int err;
2351
2352 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2353 if (!err)
2354 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2355 sc->txsdma.dd_desc_paddr,
2356 ATH_TXSTATUS_RING_SIZE);
2357
2358 return err;
2359}
2360
2361static void ath_tx_edma_cleanup(struct ath_softc *sc)
2362{
2363 struct ath_descdma *dd = &sc->txsdma;
2364
2365 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2366 dd->dd_desc_paddr);
2367}
2368
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002369int ath_tx_init(struct ath_softc *sc, int nbufs)
2370{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002371 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002372 int error = 0;
2373
Sujith797fe5cb2009-03-30 15:28:45 +05302374 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002375
Sujith797fe5cb2009-03-30 15:28:45 +05302376 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002377 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302378 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002379 ath_err(common,
2380 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302381 goto err;
2382 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002383
Sujith797fe5cb2009-03-30 15:28:45 +05302384 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002385 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302386 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002387 ath_err(common,
2388 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302389 goto err;
2390 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002391
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002392 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2393
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002394 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2395 error = ath_tx_edma_init(sc);
2396 if (error)
2397 goto err;
2398 }
2399
Sujith797fe5cb2009-03-30 15:28:45 +05302400err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002401 if (error != 0)
2402 ath_tx_cleanup(sc);
2403
2404 return error;
2405}
2406
Sujith797fe5cb2009-03-30 15:28:45 +05302407void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002408{
Sujithb77f4832008-12-07 21:44:03 +05302409 if (sc->beacon.bdma.dd_desc_len != 0)
2410 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002411
Sujithb77f4832008-12-07 21:44:03 +05302412 if (sc->tx.txdma.dd_desc_len != 0)
2413 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002414
2415 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2416 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002417}
2418
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002419void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2420{
Sujithc5170162008-10-29 10:13:59 +05302421 struct ath_atx_tid *tid;
2422 struct ath_atx_ac *ac;
2423 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002424
Sujith8ee5afb2008-12-07 21:43:36 +05302425 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302426 tidno < WME_NUM_TID;
2427 tidno++, tid++) {
2428 tid->an = an;
2429 tid->tidno = tidno;
2430 tid->seq_start = tid->seq_next = 0;
2431 tid->baw_size = WME_MAX_BA;
2432 tid->baw_head = tid->baw_tail = 0;
2433 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302434 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302435 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002436 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302437 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302438 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302439 tid->state &= ~AGGR_ADDBA_COMPLETE;
2440 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302441 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002442
Sujith8ee5afb2008-12-07 21:43:36 +05302443 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302444 acno < WME_NUM_AC; acno++, ac++) {
2445 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002446 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302447 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002448 }
2449}
2450
Sujithb5aa9bf2008-10-29 10:13:31 +05302451void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002452{
Felix Fietkau2b409942010-07-07 19:42:08 +02002453 struct ath_atx_ac *ac;
2454 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002455 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002456 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302457
Felix Fietkau2b409942010-07-07 19:42:08 +02002458 for (tidno = 0, tid = &an->tid[tidno];
2459 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002460
Felix Fietkau2b409942010-07-07 19:42:08 +02002461 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002462 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002463
Felix Fietkau2b409942010-07-07 19:42:08 +02002464 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002465
Felix Fietkau2b409942010-07-07 19:42:08 +02002466 if (tid->sched) {
2467 list_del(&tid->list);
2468 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002469 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002470
2471 if (ac->sched) {
2472 list_del(&ac->list);
2473 tid->ac->sched = false;
2474 }
2475
2476 ath_tid_drain(sc, txq, tid);
2477 tid->state &= ~AGGR_ADDBA_COMPLETE;
2478 tid->state &= ~AGGR_CLEANUP;
2479
2480 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002481 }
2482}