blob: 875b8b47fefeeb085db918b86619193ec61bddaf [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Felix Fietkau7817e4c2010-04-19 19:57:31 +020022#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
Felix Fietkauc6663872010-04-19 19:57:33 +020037static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070038 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070047};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
Sujithc37452b2009-03-09 09:31:57 +053051static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +053060static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070061 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +053063 int nbad, int txok, bool update_rc);
Sujithe8324352009-01-16 21:38:42 +053064
Felix Fietkau545750d2009-11-23 22:21:01 +010065enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020066 MCS_HT20,
67 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010068 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
Felix Fietkau0e668cd2010-04-19 19:57:32 +020072static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010084 },
85 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020086 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010090 },
91 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020092 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010096 }
97};
98
Sujithe8324352009-01-16 21:38:42 +053099/*********************/
100/* Aggregation logic */
101/*********************/
102
Sujithe8324352009-01-16 21:38:42 +0530103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
104{
105 struct ath_atx_ac *ac = tid->ac;
106
107 if (tid->paused)
108 return;
109
110 if (tid->sched)
111 return;
112
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
115
116 if (ac->sched)
117 return;
118
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
122
123static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{
125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
126
127 spin_lock_bh(&txq->axq_lock);
128 tid->paused++;
129 spin_unlock_bh(&txq->axq_lock);
130}
131
132static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
133{
134 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
135
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700136 BUG_ON(tid->paused <= 0);
Sujithe8324352009-01-16 21:38:42 +0530137 spin_lock_bh(&txq->axq_lock);
138
139 tid->paused--;
140
141 if (tid->paused > 0)
142 goto unlock;
143
144 if (list_empty(&tid->buf_q))
145 goto unlock;
146
147 ath_tx_queue_tid(txq, tid);
148 ath_txq_schedule(sc, txq);
149unlock:
150 spin_unlock_bh(&txq->axq_lock);
151}
152
153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{
155 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
156 struct ath_buf *bf;
157 struct list_head bf_head;
158 INIT_LIST_HEAD(&bf_head);
159
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700160 BUG_ON(tid->paused <= 0);
Sujithe8324352009-01-16 21:38:42 +0530161 spin_lock_bh(&txq->axq_lock);
162
163 tid->paused--;
164
165 if (tid->paused > 0) {
166 spin_unlock_bh(&txq->axq_lock);
167 return;
168 }
169
170 while (!list_empty(&tid->buf_q)) {
171 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700172 BUG_ON(bf_isretried(bf));
Sujithd43f30152009-01-16 21:38:53 +0530173 list_move_tail(&bf->list, &bf_head);
Sujithc37452b2009-03-09 09:31:57 +0530174 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530175 }
176
177 spin_unlock_bh(&txq->axq_lock);
178}
179
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
182{
183 int index, cindex;
184
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
187
188 tid->tx_buf[cindex] = NULL;
189
190 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
194}
195
196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
197 struct ath_buf *bf)
198{
199 int index, cindex;
200
201 if (bf_isretried(bf))
202 return;
203
204 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
205 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
206
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700207 BUG_ON(tid->tx_buf[cindex] != NULL);
Sujithe8324352009-01-16 21:38:42 +0530208 tid->tx_buf[cindex] = bf;
209
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
214 }
215}
216
217/*
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
222 */
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
225
226{
227 struct ath_buf *bf;
228 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700229 struct ath_tx_status ts;
230
231 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530232 INIT_LIST_HEAD(&bf_head);
233
234 for (;;) {
235 if (list_empty(&tid->buf_q))
236 break;
Sujithe8324352009-01-16 21:38:42 +0530237
Sujithd43f30152009-01-16 21:38:53 +0530238 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
239 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530240
241 if (bf_isretried(bf))
242 ath_tx_update_baw(sc, tid, bf->bf_seqno);
243
244 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700245 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530246 spin_lock(&txq->axq_lock);
247 }
248
249 tid->seq_next = tid->seq_start;
250 tid->baw_tail = tid->baw_head;
251}
252
Sujithfec247c2009-07-27 12:08:16 +0530253static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
254 struct ath_buf *bf)
Sujithe8324352009-01-16 21:38:42 +0530255{
256 struct sk_buff *skb;
257 struct ieee80211_hdr *hdr;
258
259 bf->bf_state.bf_type |= BUF_RETRY;
260 bf->bf_retries++;
Sujithfec247c2009-07-27 12:08:16 +0530261 TX_STAT_INC(txq->axq_qnum, a_retries);
Sujithe8324352009-01-16 21:38:42 +0530262
263 skb = bf->bf_mpdu;
264 hdr = (struct ieee80211_hdr *)skb->data;
265 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
266}
267
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200268static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
269{
270 struct ath_buf *bf = NULL;
271
272 spin_lock_bh(&sc->tx.txbuflock);
273
274 if (unlikely(list_empty(&sc->tx.txbuf))) {
275 spin_unlock_bh(&sc->tx.txbuflock);
276 return NULL;
277 }
278
279 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
280 list_del(&bf->list);
281
282 spin_unlock_bh(&sc->tx.txbuflock);
283
284 return bf;
285}
286
287static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
288{
289 spin_lock_bh(&sc->tx.txbuflock);
290 list_add_tail(&bf->list, &sc->tx.txbuf);
291 spin_unlock_bh(&sc->tx.txbuflock);
292}
293
Sujithd43f30152009-01-16 21:38:53 +0530294static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
295{
296 struct ath_buf *tbf;
297
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200298 tbf = ath_tx_get_buffer(sc);
299 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530300 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530301
302 ATH_TXBUF_RESET(tbf);
303
Felix Fietkau827e69b2009-11-15 23:09:25 +0100304 tbf->aphy = bf->aphy;
Sujithd43f30152009-01-16 21:38:53 +0530305 tbf->bf_mpdu = bf->bf_mpdu;
306 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400307 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530308 tbf->bf_state = bf->bf_state;
309 tbf->bf_dmacontext = bf->bf_dmacontext;
310
311 return tbf;
312}
313
314static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
315 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700316 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +0530317{
318 struct ath_node *an = NULL;
319 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530320 struct ieee80211_sta *sta;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800321 struct ieee80211_hw *hw;
Sujith1286ec62009-01-27 13:30:37 +0530322 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800323 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530324 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530325 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530326 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530327 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530328 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530329 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
330 bool rc_update = true;
Sujithe8324352009-01-16 21:38:42 +0530331
Sujitha22be222009-03-30 15:28:36 +0530332 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530333 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530334
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800335 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +0100336 hw = bf->aphy->hw;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800337
Sujith1286ec62009-01-27 13:30:37 +0530338 rcu_read_lock();
339
Johannes Berg5ed176e2009-11-04 14:42:28 +0100340 /* XXX: use ieee80211_find_sta! */
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800341 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
Sujith1286ec62009-01-27 13:30:37 +0530342 if (!sta) {
343 rcu_read_unlock();
344 return;
Sujithe8324352009-01-16 21:38:42 +0530345 }
346
Sujith1286ec62009-01-27 13:30:37 +0530347 an = (struct ath_node *)sta->drv_priv;
348 tid = ATH_AN_2_TID(an, bf->bf_tidno);
349
Sujithe8324352009-01-16 21:38:42 +0530350 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530351 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530352
Sujithd43f30152009-01-16 21:38:53 +0530353 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700354 if (ts->ts_flags & ATH9K_TX_BA) {
355 seq_st = ts->ts_seqnum;
356 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530357 } else {
Sujithd43f30152009-01-16 21:38:53 +0530358 /*
359 * AR5416 can become deaf/mute when BA
360 * issue happens. Chip needs to be reset.
361 * But AP code may have sychronization issues
362 * when perform internal reset in this routine.
363 * Only enable reset in STA mode for now.
364 */
Sujith2660b812009-02-09 13:27:26 +0530365 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530366 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530367 }
368 }
369
370 INIT_LIST_HEAD(&bf_pending);
371 INIT_LIST_HEAD(&bf_head);
372
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700373 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
Sujithe8324352009-01-16 21:38:42 +0530374 while (bf) {
375 txfail = txpending = 0;
376 bf_next = bf->bf_next;
377
378 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
379 /* transmit completion, subframe is
380 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530381 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530382 } else if (!isaggr && txok) {
383 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530384 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530385 } else {
Sujithe8324352009-01-16 21:38:42 +0530386 if (!(tid->state & AGGR_CLEANUP) &&
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -0400387 !bf_last->bf_tx_aborted) {
Sujithe8324352009-01-16 21:38:42 +0530388 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
Sujithfec247c2009-07-27 12:08:16 +0530389 ath_tx_set_retry(sc, txq, bf);
Sujithe8324352009-01-16 21:38:42 +0530390 txpending = 1;
391 } else {
392 bf->bf_state.bf_type |= BUF_XRETRY;
393 txfail = 1;
394 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530395 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530396 }
397 } else {
398 /*
399 * cleanup in progress, just fail
400 * the un-acked sub-frames
401 */
402 txfail = 1;
403 }
404 }
405
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400406 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
407 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530408 /*
409 * Make sure the last desc is reclaimed if it
410 * not a holding desc.
411 */
412 if (!bf_last->bf_stale)
413 list_move_tail(&bf->list, &bf_head);
414 else
415 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530416 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700417 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530418 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530419 }
420
421 if (!txpending) {
422 /*
423 * complete the acked-ones/xretried ones; update
424 * block-ack window
425 */
426 spin_lock_bh(&txq->axq_lock);
427 ath_tx_update_baw(sc, tid, bf->bf_seqno);
428 spin_unlock_bh(&txq->axq_lock);
429
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530430 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700431 ath_tx_rc_status(bf, ts, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530432 rc_update = false;
433 } else {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700434 ath_tx_rc_status(bf, ts, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530435 }
436
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700437 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
438 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530439 } else {
Sujithd43f30152009-01-16 21:38:53 +0530440 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400441 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
442 if (bf->bf_next == NULL && bf_last->bf_stale) {
443 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530444
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400445 tbf = ath_clone_txbuf(sc, bf_last);
446 /*
447 * Update tx baw and complete the
448 * frame with failed status if we
449 * run out of tx buf.
450 */
451 if (!tbf) {
452 spin_lock_bh(&txq->axq_lock);
453 ath_tx_update_baw(sc, tid,
454 bf->bf_seqno);
455 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400456
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400457 bf->bf_state.bf_type |=
458 BUF_XRETRY;
459 ath_tx_rc_status(bf, ts, nbad,
460 0, false);
461 ath_tx_complete_buf(sc, bf, txq,
462 &bf_head,
463 ts, 0, 0);
464 break;
465 }
466
467 ath9k_hw_cleartxdesc(sc->sc_ah,
468 tbf->bf_desc);
469 list_add_tail(&tbf->list, &bf_head);
470 } else {
471 /*
472 * Clear descriptor status words for
473 * software retry
474 */
475 ath9k_hw_cleartxdesc(sc->sc_ah,
476 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400477 }
Sujithe8324352009-01-16 21:38:42 +0530478 }
479
480 /*
481 * Put this buffer to the temporary pending
482 * queue to retain ordering
483 */
484 list_splice_tail_init(&bf_head, &bf_pending);
485 }
486
487 bf = bf_next;
488 }
489
490 if (tid->state & AGGR_CLEANUP) {
Sujithe8324352009-01-16 21:38:42 +0530491 if (tid->baw_head == tid->baw_tail) {
492 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530493 tid->state &= ~AGGR_CLEANUP;
494
495 /* send buffered frames as singles */
496 ath_tx_flush_tid(sc, tid);
Sujithd43f30152009-01-16 21:38:53 +0530497 }
Sujith1286ec62009-01-27 13:30:37 +0530498 rcu_read_unlock();
Sujithe8324352009-01-16 21:38:42 +0530499 return;
500 }
501
Sujithd43f30152009-01-16 21:38:53 +0530502 /* prepend un-acked frames to the beginning of the pending frame queue */
Sujithe8324352009-01-16 21:38:42 +0530503 if (!list_empty(&bf_pending)) {
504 spin_lock_bh(&txq->axq_lock);
505 list_splice(&bf_pending, &tid->buf_q);
506 ath_tx_queue_tid(txq, tid);
507 spin_unlock_bh(&txq->axq_lock);
508 }
509
Sujith1286ec62009-01-27 13:30:37 +0530510 rcu_read_unlock();
511
Sujithe8324352009-01-16 21:38:42 +0530512 if (needreset)
513 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530514}
515
516static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
517 struct ath_atx_tid *tid)
518{
Sujithe8324352009-01-16 21:38:42 +0530519 struct sk_buff *skb;
520 struct ieee80211_tx_info *tx_info;
521 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530522 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530523 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530524 int i;
525
Sujitha22be222009-03-30 15:28:36 +0530526 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530527 tx_info = IEEE80211_SKB_CB(skb);
528 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530529
530 /*
531 * Find the lowest frame length among the rate series that will have a
532 * 4ms transmit duration.
533 * TODO - TXOP limit needs to be considered.
534 */
535 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
536
537 for (i = 0; i < 4; i++) {
538 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100539 int modeidx;
540 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530541 legacy = 1;
542 break;
543 }
544
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200545 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100546 modeidx = MCS_HT40;
547 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200548 modeidx = MCS_HT20;
549
550 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
551 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100552
553 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530554 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530555 }
556 }
557
558 /*
559 * limit aggregate size by the minimum rate if rate selected is
560 * not a probe rate, if rate selected is a probe rate then
561 * avoid aggregation of this packet.
562 */
563 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
564 return 0;
565
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530566 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
567 aggr_limit = min((max_4ms_framelen * 3) / 8,
568 (u32)ATH_AMPDU_LIMIT_MAX);
569 else
570 aggr_limit = min(max_4ms_framelen,
571 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530572
573 /*
574 * h/w can accept aggregates upto 16 bit lengths (65535).
575 * The IE, however can hold upto 65536, which shows up here
576 * as zero. Ignore 65536 since we are constrained by hw.
577 */
Sujith4ef70842009-07-23 15:32:41 +0530578 if (tid->an->maxampdu)
579 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530580
581 return aggr_limit;
582}
583
584/*
Sujithd43f30152009-01-16 21:38:53 +0530585 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530586 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530587 */
588static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
589 struct ath_buf *bf, u16 frmlen)
590{
Sujithe8324352009-01-16 21:38:42 +0530591 struct sk_buff *skb = bf->bf_mpdu;
592 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530593 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530594 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100595 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200596 int width, streams, half_gi, ndelim, mindelim;
Sujithe8324352009-01-16 21:38:42 +0530597
598 /* Select standard number of delimiters based on frame length alone */
599 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
600
601 /*
602 * If encryption enabled, hardware requires some more padding between
603 * subframes.
604 * TODO - this could be improved to be dependent on the rate.
605 * The hardware can keep up at lower rates, but not higher rates
606 */
607 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
608 ndelim += ATH_AGGR_ENCRYPTDELIM;
609
610 /*
611 * Convert desired mpdu density from microeconds to bytes based
612 * on highest rate in rate series (i.e. first rate) to determine
613 * required minimum length for subframe. Take into account
614 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530615 *
Sujithe8324352009-01-16 21:38:42 +0530616 * If there is no mpdu density restriction, no further calculation
617 * is needed.
618 */
Sujith4ef70842009-07-23 15:32:41 +0530619
620 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530621 return ndelim;
622
623 rix = tx_info->control.rates[0].idx;
624 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530625 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
626 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
627
628 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530629 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530630 else
Sujith4ef70842009-07-23 15:32:41 +0530631 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530632
633 if (nsymbols == 0)
634 nsymbols = 1;
635
Felix Fietkauc6663872010-04-19 19:57:33 +0200636 streams = HT_RC_2_STREAMS(rix);
637 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530638 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
639
Sujithe8324352009-01-16 21:38:42 +0530640 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530641 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
642 ndelim = max(mindelim, ndelim);
643 }
644
645 return ndelim;
646}
647
648static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530649 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530650 struct ath_atx_tid *tid,
651 struct list_head *bf_q)
Sujithe8324352009-01-16 21:38:42 +0530652{
653#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530654 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
655 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530656 u16 aggr_limit = 0, al = 0, bpad = 0,
657 al_delta, h_baw = tid->baw_size / 2;
658 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Sujithe8324352009-01-16 21:38:42 +0530659
660 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
661
662 do {
663 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
664
Sujithd43f30152009-01-16 21:38:53 +0530665 /* do not step over block-ack window */
Sujithe8324352009-01-16 21:38:42 +0530666 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
667 status = ATH_AGGR_BAW_CLOSED;
668 break;
669 }
670
671 if (!rl) {
672 aggr_limit = ath_lookup_rate(sc, bf, tid);
673 rl = 1;
674 }
675
Sujithd43f30152009-01-16 21:38:53 +0530676 /* do not exceed aggregation limit */
Sujithe8324352009-01-16 21:38:42 +0530677 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
678
Sujithd43f30152009-01-16 21:38:53 +0530679 if (nframes &&
680 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530681 status = ATH_AGGR_LIMITED;
682 break;
683 }
684
Sujithd43f30152009-01-16 21:38:53 +0530685 /* do not exceed subframe limit */
686 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530687 status = ATH_AGGR_LIMITED;
688 break;
689 }
Sujithd43f30152009-01-16 21:38:53 +0530690 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530691
Sujithd43f30152009-01-16 21:38:53 +0530692 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530693 al += bpad + al_delta;
694
695 /*
696 * Get the delimiters needed to meet the MPDU
697 * density for this node.
698 */
699 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
Sujithe8324352009-01-16 21:38:42 +0530700 bpad = PADBYTES(al_delta) + (ndelim << 2);
701
702 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400703 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530704
Sujithd43f30152009-01-16 21:38:53 +0530705 /* link buffers of this frame to the aggregate */
Sujithe8324352009-01-16 21:38:42 +0530706 ath_tx_addto_baw(sc, tid, bf);
Sujithd43f30152009-01-16 21:38:53 +0530707 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
708 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530709 if (bf_prev) {
710 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400711 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
712 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530713 }
714 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530715
Sujithe8324352009-01-16 21:38:42 +0530716 } while (!list_empty(&tid->buf_q));
717
718 bf_first->bf_al = al;
719 bf_first->bf_nframes = nframes;
Sujithd43f30152009-01-16 21:38:53 +0530720
Sujithe8324352009-01-16 21:38:42 +0530721 return status;
722#undef PADBYTES
723}
724
725static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
726 struct ath_atx_tid *tid)
727{
Sujithd43f30152009-01-16 21:38:53 +0530728 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530729 enum ATH_AGGR_STATUS status;
730 struct list_head bf_q;
Sujithe8324352009-01-16 21:38:42 +0530731
732 do {
733 if (list_empty(&tid->buf_q))
734 return;
735
736 INIT_LIST_HEAD(&bf_q);
737
Sujithfec247c2009-07-27 12:08:16 +0530738 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
Sujithe8324352009-01-16 21:38:42 +0530739
740 /*
Sujithd43f30152009-01-16 21:38:53 +0530741 * no frames picked up to be aggregated;
742 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530743 */
744 if (list_empty(&bf_q))
745 break;
746
747 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530748 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530749
Sujithd43f30152009-01-16 21:38:53 +0530750 /* if only one frame, send as non-aggregate */
Sujithe8324352009-01-16 21:38:42 +0530751 if (bf->bf_nframes == 1) {
Sujithe8324352009-01-16 21:38:42 +0530752 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530753 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530754 ath_buf_set_rate(sc, bf);
755 ath_tx_txqaddbuf(sc, txq, &bf_q);
756 continue;
757 }
758
Sujithd43f30152009-01-16 21:38:53 +0530759 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530760 bf->bf_state.bf_type |= BUF_AGGR;
761 ath_buf_set_rate(sc, bf);
762 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
763
Sujithd43f30152009-01-16 21:38:53 +0530764 /* anchor last desc of aggregate */
765 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530766
Sujithe8324352009-01-16 21:38:42 +0530767 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530768 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530769
770 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
771 status != ATH_AGGR_BAW_CLOSED);
772}
773
Sujithf83da962009-07-23 15:32:37 +0530774void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
775 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530776{
777 struct ath_atx_tid *txtid;
778 struct ath_node *an;
779
780 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530781 txtid = ATH_AN_2_TID(an, tid);
782 txtid->state |= AGGR_ADDBA_PROGRESS;
783 ath_tx_pause_tid(sc, txtid);
784 *ssn = txtid->seq_start;
Sujithe8324352009-01-16 21:38:42 +0530785}
786
Sujithf83da962009-07-23 15:32:37 +0530787void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530788{
789 struct ath_node *an = (struct ath_node *)sta->drv_priv;
790 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
791 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700792 struct ath_tx_status ts;
Sujithe8324352009-01-16 21:38:42 +0530793 struct ath_buf *bf;
794 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700795
796 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530797 INIT_LIST_HEAD(&bf_head);
798
799 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530800 return;
Sujithe8324352009-01-16 21:38:42 +0530801
802 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530803 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530804 return;
Sujithe8324352009-01-16 21:38:42 +0530805 }
806
807 ath_tx_pause_tid(sc, txtid);
808
809 /* drop all software retried frames and mark this TID */
810 spin_lock_bh(&txq->axq_lock);
811 while (!list_empty(&txtid->buf_q)) {
812 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
813 if (!bf_isretried(bf)) {
814 /*
815 * NB: it's based on the assumption that
816 * software retried frame will always stay
817 * at the head of software queue.
818 */
819 break;
820 }
Sujithd43f30152009-01-16 21:38:53 +0530821 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530822 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700823 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530824 }
Sujithd43f30152009-01-16 21:38:53 +0530825 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530826
827 if (txtid->baw_head != txtid->baw_tail) {
Sujithe8324352009-01-16 21:38:42 +0530828 txtid->state |= AGGR_CLEANUP;
829 } else {
830 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530831 ath_tx_flush_tid(sc, txtid);
832 }
Sujithe8324352009-01-16 21:38:42 +0530833}
834
835void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
836{
837 struct ath_atx_tid *txtid;
838 struct ath_node *an;
839
840 an = (struct ath_node *)sta->drv_priv;
841
842 if (sc->sc_flags & SC_OP_TXAGGR) {
843 txtid = ATH_AN_2_TID(an, tid);
844 txtid->baw_size =
845 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
846 txtid->state |= AGGR_ADDBA_COMPLETE;
847 txtid->state &= ~AGGR_ADDBA_PROGRESS;
848 ath_tx_resume_tid(sc, txtid);
849 }
850}
851
852bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
853{
854 struct ath_atx_tid *txtid;
855
856 if (!(sc->sc_flags & SC_OP_TXAGGR))
857 return false;
858
859 txtid = ATH_AN_2_TID(an, tidno);
860
Vasanthakumar Thiagarajanc3d8f022009-06-10 17:50:08 +0530861 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
Sujithe8324352009-01-16 21:38:42 +0530862 return true;
Sujithe8324352009-01-16 21:38:42 +0530863 return false;
864}
865
866/********************/
867/* Queue Management */
868/********************/
869
Sujithe8324352009-01-16 21:38:42 +0530870static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
871 struct ath_txq *txq)
872{
873 struct ath_atx_ac *ac, *ac_tmp;
874 struct ath_atx_tid *tid, *tid_tmp;
875
876 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
877 list_del(&ac->list);
878 ac->sched = false;
879 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
880 list_del(&tid->list);
881 tid->sched = false;
882 ath_tid_drain(sc, txq, tid);
883 }
884 }
885}
886
887struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
888{
Sujithcbe61d82009-02-09 13:27:12 +0530889 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700890 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530891 struct ath9k_tx_queue_info qi;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400892 int qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530893
894 memset(&qi, 0, sizeof(qi));
895 qi.tqi_subtype = subtype;
896 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
897 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
898 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
899 qi.tqi_physCompBuf = 0;
900
901 /*
902 * Enable interrupts only for EOL and DESC conditions.
903 * We mark tx descriptors to receive a DESC interrupt
904 * when a tx queue gets deep; otherwise waiting for the
905 * EOL to reap descriptors. Note that this is done to
906 * reduce interrupt load and this only defers reaping
907 * descriptors, never transmitting frames. Aside from
908 * reducing interrupts this also permits more concurrency.
909 * The only potential downside is if the tx queue backs
910 * up in which case the top half of the kernel may backup
911 * due to a lack of tx descriptors.
912 *
913 * The UAPSD queue is an exception, since we take a desc-
914 * based intr on the EOSP frames.
915 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400916 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
917 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
918 TXQ_FLAG_TXERRINT_ENABLE;
919 } else {
920 if (qtype == ATH9K_TX_QUEUE_UAPSD)
921 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
922 else
923 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
924 TXQ_FLAG_TXDESCINT_ENABLE;
925 }
Sujithe8324352009-01-16 21:38:42 +0530926 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
927 if (qnum == -1) {
928 /*
929 * NB: don't print a message, this happens
930 * normally on parts with too few tx queues
931 */
932 return NULL;
933 }
934 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700935 ath_print(common, ATH_DBG_FATAL,
936 "qnum %u out of range, max %u!\n",
937 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
Sujithe8324352009-01-16 21:38:42 +0530938 ath9k_hw_releasetxqueue(ah, qnum);
939 return NULL;
940 }
941 if (!ATH_TXQ_SETUP(sc, qnum)) {
942 struct ath_txq *txq = &sc->tx.txq[qnum];
943
Felix Fietkau293f2ba2010-06-12 00:33:49 -0400944 txq->axq_class = subtype;
Sujithe8324352009-01-16 21:38:42 +0530945 txq->axq_qnum = qnum;
946 txq->axq_link = NULL;
947 INIT_LIST_HEAD(&txq->axq_q);
948 INIT_LIST_HEAD(&txq->axq_acq);
949 spin_lock_init(&txq->axq_lock);
950 txq->axq_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -0400951 txq->axq_tx_inprogress = false;
Sujithe8324352009-01-16 21:38:42 +0530952 sc->tx.txqsetup |= 1<<qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400953
954 txq->txq_headidx = txq->txq_tailidx = 0;
955 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
956 INIT_LIST_HEAD(&txq->txq_fifo[i]);
957 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +0530958 }
959 return &sc->tx.txq[qnum];
960}
961
Sujithe8324352009-01-16 21:38:42 +0530962int ath_txq_update(struct ath_softc *sc, int qnum,
963 struct ath9k_tx_queue_info *qinfo)
964{
Sujithcbe61d82009-02-09 13:27:12 +0530965 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +0530966 int error = 0;
967 struct ath9k_tx_queue_info qi;
968
969 if (qnum == sc->beacon.beaconq) {
970 /*
971 * XXX: for beacon queue, we just save the parameter.
972 * It will be picked up by ath_beaconq_config when
973 * it's necessary.
974 */
975 sc->beacon.beacon_qi = *qinfo;
976 return 0;
977 }
978
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700979 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +0530980
981 ath9k_hw_get_txq_props(ah, qnum, &qi);
982 qi.tqi_aifs = qinfo->tqi_aifs;
983 qi.tqi_cwmin = qinfo->tqi_cwmin;
984 qi.tqi_cwmax = qinfo->tqi_cwmax;
985 qi.tqi_burstTime = qinfo->tqi_burstTime;
986 qi.tqi_readyTime = qinfo->tqi_readyTime;
987
988 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700989 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
990 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +0530991 error = -EIO;
992 } else {
993 ath9k_hw_resettxqueue(ah, qnum);
994 }
995
996 return error;
997}
998
999int ath_cabq_update(struct ath_softc *sc)
1000{
1001 struct ath9k_tx_queue_info qi;
1002 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301003
1004 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1005 /*
1006 * Ensure the readytime % is within the bounds.
1007 */
Sujith17d79042009-02-09 13:27:03 +05301008 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1009 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1010 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1011 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301012
Johannes Berg57c4d7b2009-04-23 16:10:04 +02001013 qi.tqi_readyTime = (sc->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301014 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301015 ath_txq_update(sc, qnum, &qi);
1016
1017 return 0;
1018}
1019
Sujith043a0402009-01-16 21:38:47 +05301020/*
1021 * Drain a given TX queue (could be Beacon or Data)
1022 *
1023 * This assumes output has been stopped and
1024 * we do not need to block ath_tx_tasklet.
1025 */
1026void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301027{
1028 struct ath_buf *bf, *lastbf;
1029 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001030 struct ath_tx_status ts;
1031
1032 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301033 INIT_LIST_HEAD(&bf_head);
1034
Sujithe8324352009-01-16 21:38:42 +05301035 for (;;) {
1036 spin_lock_bh(&txq->axq_lock);
1037
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001038 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1039 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1040 txq->txq_headidx = txq->txq_tailidx = 0;
1041 spin_unlock_bh(&txq->axq_lock);
1042 break;
1043 } else {
1044 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1045 struct ath_buf, list);
1046 }
1047 } else {
1048 if (list_empty(&txq->axq_q)) {
1049 txq->axq_link = NULL;
1050 spin_unlock_bh(&txq->axq_lock);
1051 break;
1052 }
1053 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1054 list);
Sujithe8324352009-01-16 21:38:42 +05301055
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001056 if (bf->bf_stale) {
1057 list_del(&bf->list);
1058 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301059
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001060 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001061 continue;
1062 }
Sujithe8324352009-01-16 21:38:42 +05301063 }
1064
1065 lastbf = bf->bf_lastbf;
Vasanthakumar Thiagarajan6d913f72010-04-15 17:38:46 -04001066 if (!retry_tx)
1067 lastbf->bf_tx_aborted = true;
Sujithe8324352009-01-16 21:38:42 +05301068
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001069 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1070 list_cut_position(&bf_head,
1071 &txq->txq_fifo[txq->txq_tailidx],
1072 &lastbf->list);
1073 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1074 } else {
1075 /* remove ath_buf's of the same mpdu from txq */
1076 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1077 }
1078
Sujithe8324352009-01-16 21:38:42 +05301079 txq->axq_depth--;
1080
1081 spin_unlock_bh(&txq->axq_lock);
1082
1083 if (bf_isampdu(bf))
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001084 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
Sujithe8324352009-01-16 21:38:42 +05301085 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001086 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301087 }
1088
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001089 spin_lock_bh(&txq->axq_lock);
1090 txq->axq_tx_inprogress = false;
1091 spin_unlock_bh(&txq->axq_lock);
1092
Sujithe8324352009-01-16 21:38:42 +05301093 /* flush any pending frames if aggregation is enabled */
1094 if (sc->sc_flags & SC_OP_TXAGGR) {
1095 if (!retry_tx) {
1096 spin_lock_bh(&txq->axq_lock);
1097 ath_txq_drain_pending_buffers(sc, txq);
1098 spin_unlock_bh(&txq->axq_lock);
1099 }
1100 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001101
1102 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1103 spin_lock_bh(&txq->axq_lock);
1104 while (!list_empty(&txq->txq_fifo_pending)) {
1105 bf = list_first_entry(&txq->txq_fifo_pending,
1106 struct ath_buf, list);
1107 list_cut_position(&bf_head,
1108 &txq->txq_fifo_pending,
1109 &bf->bf_lastbf->list);
1110 spin_unlock_bh(&txq->axq_lock);
1111
1112 if (bf_isampdu(bf))
1113 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1114 &ts, 0);
1115 else
1116 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1117 &ts, 0, 0);
1118 spin_lock_bh(&txq->axq_lock);
1119 }
1120 spin_unlock_bh(&txq->axq_lock);
1121 }
Sujithe8324352009-01-16 21:38:42 +05301122}
1123
Sujith043a0402009-01-16 21:38:47 +05301124void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1125{
Sujithcbe61d82009-02-09 13:27:12 +05301126 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001127 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301128 struct ath_txq *txq;
1129 int i, npend = 0;
1130
1131 if (sc->sc_flags & SC_OP_INVALID)
1132 return;
1133
1134 /* Stop beacon queue */
1135 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1136
1137 /* Stop data queues */
1138 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1139 if (ATH_TXQ_SETUP(sc, i)) {
1140 txq = &sc->tx.txq[i];
1141 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1142 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1143 }
1144 }
1145
1146 if (npend) {
1147 int r;
1148
Sujithe8009e92009-12-14 14:57:08 +05301149 ath_print(common, ATH_DBG_FATAL,
Justin P. Mattock9be8ab22010-05-26 11:00:04 -07001150 "Failed to stop TX DMA. Resetting hardware!\n");
Sujith043a0402009-01-16 21:38:47 +05301151
1152 spin_lock_bh(&sc->sc_resetlock);
Sujithe8009e92009-12-14 14:57:08 +05301153 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
Sujith043a0402009-01-16 21:38:47 +05301154 if (r)
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001155 ath_print(common, ATH_DBG_FATAL,
1156 "Unable to reset hardware; reset status %d\n",
1157 r);
Sujith043a0402009-01-16 21:38:47 +05301158 spin_unlock_bh(&sc->sc_resetlock);
1159 }
1160
1161 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1162 if (ATH_TXQ_SETUP(sc, i))
1163 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1164 }
1165}
1166
Sujithe8324352009-01-16 21:38:42 +05301167void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1168{
1169 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1170 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1171}
1172
Sujithe8324352009-01-16 21:38:42 +05301173void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1174{
1175 struct ath_atx_ac *ac;
1176 struct ath_atx_tid *tid;
1177
1178 if (list_empty(&txq->axq_acq))
1179 return;
1180
1181 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1182 list_del(&ac->list);
1183 ac->sched = false;
1184
1185 do {
1186 if (list_empty(&ac->tid_q))
1187 return;
1188
1189 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1190 list_del(&tid->list);
1191 tid->sched = false;
1192
1193 if (tid->paused)
1194 continue;
1195
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001196 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301197
1198 /*
1199 * add tid to round-robin queue if more frames
1200 * are pending for the tid
1201 */
1202 if (!list_empty(&tid->buf_q))
1203 ath_tx_queue_tid(txq, tid);
1204
1205 break;
1206 } while (!list_empty(&ac->tid_q));
1207
1208 if (!list_empty(&ac->tid_q)) {
1209 if (!ac->sched) {
1210 ac->sched = true;
1211 list_add_tail(&ac->list, &txq->axq_acq);
1212 }
1213 }
1214}
1215
1216int ath_tx_setup(struct ath_softc *sc, int haltype)
1217{
1218 struct ath_txq *txq;
1219
1220 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001221 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1222 "HAL AC %u out of range, max %zu!\n",
Sujithe8324352009-01-16 21:38:42 +05301223 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1224 return 0;
1225 }
1226 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1227 if (txq != NULL) {
1228 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1229 return 1;
1230 } else
1231 return 0;
1232}
1233
1234/***********/
1235/* TX, DMA */
1236/***********/
1237
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001238/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001239 * Insert a chain of ath_buf (descriptors) on a txq and
1240 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001241 */
Sujith102e0572008-10-29 10:15:16 +05301242static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1243 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001244{
Sujithcbe61d82009-02-09 13:27:12 +05301245 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001246 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001247 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301248
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001249 /*
1250 * Insert the frame on the outbound list and
1251 * pass it on to the hardware.
1252 */
1253
1254 if (list_empty(head))
1255 return;
1256
1257 bf = list_first_entry(head, struct ath_buf, list);
1258
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001259 ath_print(common, ATH_DBG_QUEUE,
1260 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001261
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001262 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1263 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1264 list_splice_tail_init(head, &txq->txq_fifo_pending);
1265 return;
1266 }
1267 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1268 ath_print(common, ATH_DBG_XMIT,
1269 "Initializing tx fifo %d which "
1270 "is non-empty\n",
1271 txq->txq_headidx);
1272 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1273 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1274 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001275 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001276 ath_print(common, ATH_DBG_XMIT,
1277 "TXDP[%u] = %llx (%p)\n",
1278 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001279 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001280 list_splice_tail_init(head, &txq->axq_q);
1281
1282 if (txq->axq_link == NULL) {
1283 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1284 ath_print(common, ATH_DBG_XMIT,
1285 "TXDP[%u] = %llx (%p)\n",
1286 txq->axq_qnum, ito64(bf->bf_daddr),
1287 bf->bf_desc);
1288 } else {
1289 *txq->axq_link = bf->bf_daddr;
1290 ath_print(common, ATH_DBG_XMIT,
1291 "link[%u] (%p)=%llx (%p)\n",
1292 txq->axq_qnum, txq->axq_link,
1293 ito64(bf->bf_daddr), bf->bf_desc);
1294 }
1295 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1296 &txq->axq_link);
1297 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001298 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001299 txq->axq_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001300}
1301
Sujithe8324352009-01-16 21:38:42 +05301302static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1303 struct list_head *bf_head,
1304 struct ath_tx_control *txctl)
1305{
1306 struct ath_buf *bf;
1307
Sujithe8324352009-01-16 21:38:42 +05301308 bf = list_first_entry(bf_head, struct ath_buf, list);
1309 bf->bf_state.bf_type |= BUF_AMPDU;
Sujithfec247c2009-07-27 12:08:16 +05301310 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
Sujithe8324352009-01-16 21:38:42 +05301311
1312 /*
1313 * Do not queue to h/w when any of the following conditions is true:
1314 * - there are pending frames in software queue
1315 * - the TID is currently paused for ADDBA/BAR request
1316 * - seqno is not within block-ack window
1317 * - h/w queue depth exceeds low water mark
1318 */
1319 if (!list_empty(&tid->buf_q) || tid->paused ||
1320 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1321 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001322 /*
Sujithe8324352009-01-16 21:38:42 +05301323 * Add this frame to software queue for scheduling later
1324 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001325 */
Sujithd43f30152009-01-16 21:38:53 +05301326 list_move_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301327 ath_tx_queue_tid(txctl->txq, tid);
1328 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001329 }
1330
Sujithe8324352009-01-16 21:38:42 +05301331 /* Add sub-frame to BAW */
1332 ath_tx_addto_baw(sc, tid, bf);
1333
1334 /* Queue to h/w without aggregation */
1335 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301336 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301337 ath_buf_set_rate(sc, bf);
1338 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
Sujithc4288392008-11-18 09:09:30 +05301339}
1340
Sujithc37452b2009-03-09 09:31:57 +05301341static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1342 struct ath_atx_tid *tid,
1343 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001344{
Sujithe8324352009-01-16 21:38:42 +05301345 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001346
Sujithe8324352009-01-16 21:38:42 +05301347 bf = list_first_entry(bf_head, struct ath_buf, list);
1348 bf->bf_state.bf_type &= ~BUF_AMPDU;
1349
1350 /* update starting sequence number for subsequent ADDBA request */
1351 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1352
1353 bf->bf_nframes = 1;
Sujithd43f30152009-01-16 21:38:53 +05301354 bf->bf_lastbf = bf;
Sujithe8324352009-01-16 21:38:42 +05301355 ath_buf_set_rate(sc, bf);
1356 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301357 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001358}
1359
Sujithc37452b2009-03-09 09:31:57 +05301360static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1361 struct list_head *bf_head)
1362{
1363 struct ath_buf *bf;
1364
1365 bf = list_first_entry(bf_head, struct ath_buf, list);
1366
1367 bf->bf_lastbf = bf;
1368 bf->bf_nframes = 1;
1369 ath_buf_set_rate(sc, bf);
1370 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301371 TX_STAT_INC(txq->axq_qnum, queued);
Sujithc37452b2009-03-09 09:31:57 +05301372}
1373
Sujith528f0c62008-10-29 10:14:26 +05301374static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001375{
Sujith528f0c62008-10-29 10:14:26 +05301376 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001377 enum ath9k_pkt_type htype;
1378 __le16 fc;
1379
Sujith528f0c62008-10-29 10:14:26 +05301380 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001381 fc = hdr->frame_control;
1382
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001383 if (ieee80211_is_beacon(fc))
1384 htype = ATH9K_PKT_TYPE_BEACON;
1385 else if (ieee80211_is_probe_resp(fc))
1386 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1387 else if (ieee80211_is_atim(fc))
1388 htype = ATH9K_PKT_TYPE_ATIM;
1389 else if (ieee80211_is_pspoll(fc))
1390 htype = ATH9K_PKT_TYPE_PSPOLL;
1391 else
1392 htype = ATH9K_PKT_TYPE_NORMAL;
1393
1394 return htype;
1395}
1396
Sujith528f0c62008-10-29 10:14:26 +05301397static int get_hw_crypto_keytype(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001398{
Sujith528f0c62008-10-29 10:14:26 +05301399 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1400
1401 if (tx_info->control.hw_key) {
1402 if (tx_info->control.hw_key->alg == ALG_WEP)
1403 return ATH9K_KEY_TYPE_WEP;
1404 else if (tx_info->control.hw_key->alg == ALG_TKIP)
1405 return ATH9K_KEY_TYPE_TKIP;
1406 else if (tx_info->control.hw_key->alg == ALG_CCMP)
1407 return ATH9K_KEY_TYPE_AES;
1408 }
1409
1410 return ATH9K_KEY_TYPE_CLEAR;
1411}
1412
Sujith528f0c62008-10-29 10:14:26 +05301413static void assign_aggr_tid_seqno(struct sk_buff *skb,
1414 struct ath_buf *bf)
1415{
1416 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1417 struct ieee80211_hdr *hdr;
1418 struct ath_node *an;
1419 struct ath_atx_tid *tid;
1420 __le16 fc;
1421 u8 *qc;
1422
1423 if (!tx_info->control.sta)
1424 return;
1425
1426 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1427 hdr = (struct ieee80211_hdr *)skb->data;
1428 fc = hdr->frame_control;
1429
Sujith528f0c62008-10-29 10:14:26 +05301430 if (ieee80211_is_data_qos(fc)) {
1431 qc = ieee80211_get_qos_ctl(hdr);
1432 bf->bf_tidno = qc[0] & 0xf;
Sujith98deeea2008-08-11 14:05:46 +05301433 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001434
Sujithe8324352009-01-16 21:38:42 +05301435 /*
1436 * For HT capable stations, we save tidno for later use.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301437 * We also override seqno set by upper layer with the one
1438 * in tx aggregation state.
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301439 */
1440 tid = ATH_AN_2_TID(an, bf->bf_tidno);
Sujith17b182e2009-12-14 14:56:56 +05301441 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
Senthil Balasubramaniand3a1db12008-12-22 16:31:58 +05301442 bf->bf_seqno = tid->seq_next;
1443 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
Sujith528f0c62008-10-29 10:14:26 +05301444}
1445
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001446static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
Sujith528f0c62008-10-29 10:14:26 +05301447{
1448 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1449 int flags = 0;
1450
1451 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1452 flags |= ATH9K_TXDESC_INTREQ;
1453
1454 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1455 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301456
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001457 if (use_ldpc)
1458 flags |= ATH9K_TXDESC_LDPC;
1459
Sujith528f0c62008-10-29 10:14:26 +05301460 return flags;
1461}
1462
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001463/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001464 * rix - rate index
1465 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1466 * width - 0 for 20 MHz, 1 for 40 MHz
1467 * half_gi - to use 4us v/s 3.6 us for symbol time
1468 */
Sujith102e0572008-10-29 10:15:16 +05301469static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1470 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001471{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001472 u32 nbits, nsymbits, duration, nsymbols;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001473 int streams, pktlen;
1474
Sujithcd3d39a2008-08-11 14:03:34 +05301475 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
Sujithe63835b2008-11-18 09:07:53 +05301476
1477 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001478 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001479 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001480 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001481 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1482
1483 if (!half_gi)
1484 duration = SYMBOL_TIME(nsymbols);
1485 else
1486 duration = SYMBOL_TIME_HALFGI(nsymbols);
1487
Sujithe63835b2008-11-18 09:07:53 +05301488 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001489 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301490
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001491 return duration;
1492}
1493
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001494static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1495{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001496 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001497 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301498 struct sk_buff *skb;
1499 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301500 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001501 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301502 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301503 int i, flags = 0;
1504 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301505 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301506
1507 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301508
Sujitha22be222009-03-30 15:28:36 +05301509 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301510 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301511 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301512 hdr = (struct ieee80211_hdr *)skb->data;
1513 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301514
Sujithc89424d2009-01-30 14:29:28 +05301515 /*
1516 * We check if Short Preamble is needed for the CTS rate by
1517 * checking the BSS's global flag.
1518 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1519 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001520 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1521 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301522 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001523 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001524
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001525 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001526 bool is_40, is_sgi, is_sp;
1527 int phy;
1528
Sujithe63835b2008-11-18 09:07:53 +05301529 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001530 continue;
1531
Sujitha8efee42008-11-18 09:07:30 +05301532 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301533 series[i].Tries = rates[i].count;
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001534 series[i].ChSel = common->tx_chainmask;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001535
Felix Fietkau27032052010-01-17 21:08:50 +01001536 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1537 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301538 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001539 flags |= ATH9K_TXDESC_RTSENA;
1540 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1541 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1542 flags |= ATH9K_TXDESC_CTSENA;
1543 }
1544
Sujithc89424d2009-01-30 14:29:28 +05301545 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1546 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1547 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1548 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001549
Felix Fietkau545750d2009-11-23 22:21:01 +01001550 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1551 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1552 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1553
1554 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1555 /* MCS rates */
1556 series[i].Rate = rix | 0x80;
1557 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1558 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001559 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1560 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001561 continue;
1562 }
1563
1564 /* legcay rates */
1565 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1566 !(rate->flags & IEEE80211_RATE_ERP_G))
1567 phy = WLAN_RC_PHY_CCK;
1568 else
1569 phy = WLAN_RC_PHY_OFDM;
1570
1571 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1572 series[i].Rate = rate->hw_value;
1573 if (rate->hw_value_short) {
1574 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1575 series[i].Rate |= rate->hw_value_short;
1576 } else {
1577 is_sp = false;
1578 }
1579
1580 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1581 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001582 }
1583
Felix Fietkau27032052010-01-17 21:08:50 +01001584 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1585 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1586 flags &= ~ATH9K_TXDESC_RTSENA;
1587
1588 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1589 if (flags & ATH9K_TXDESC_RTSENA)
1590 flags &= ~ATH9K_TXDESC_CTSENA;
1591
Sujithe63835b2008-11-18 09:07:53 +05301592 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301593 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1594 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301595 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301596 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301597
Sujith17d79042009-02-09 13:27:03 +05301598 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301599 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001600}
1601
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001602static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
Sujithe8324352009-01-16 21:38:42 +05301603 struct sk_buff *skb,
1604 struct ath_tx_control *txctl)
1605{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001606 struct ath_wiphy *aphy = hw->priv;
1607 struct ath_softc *sc = aphy->sc;
Sujithe8324352009-01-16 21:38:42 +05301608 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1609 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301610 int hdrlen;
1611 __le16 fc;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001612 int padpos, padsize;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001613 bool use_ldpc = false;
Sujithe8324352009-01-16 21:38:42 +05301614
Felix Fietkau827e69b2009-11-15 23:09:25 +01001615 tx_info->pad[0] = 0;
1616 switch (txctl->frame_type) {
Pavel Roskinc81494d2010-03-31 18:05:25 -04001617 case ATH9K_IFT_NOT_INTERNAL:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001618 break;
Pavel Roskinc81494d2010-03-31 18:05:25 -04001619 case ATH9K_IFT_PAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001620 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1621 /* fall through */
Pavel Roskinc81494d2010-03-31 18:05:25 -04001622 case ATH9K_IFT_UNPAUSE:
Felix Fietkau827e69b2009-11-15 23:09:25 +01001623 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1624 break;
1625 }
Sujithe8324352009-01-16 21:38:42 +05301626 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1627 fc = hdr->frame_control;
1628
1629 ATH_TXBUF_RESET(bf);
1630
Felix Fietkau827e69b2009-11-15 23:09:25 +01001631 bf->aphy = aphy;
Benoit Papillault1bc14882009-11-24 15:49:18 +01001632 bf->bf_frmlen = skb->len + FCS_LEN;
1633 /* Remove the padding size from bf_frmlen, if any */
1634 padpos = ath9k_cmn_padpos(hdr->frame_control);
1635 padsize = padpos & 3;
1636 if (padsize && skb->len>padpos+padsize) {
1637 bf->bf_frmlen -= padsize;
1638 }
Sujithe8324352009-01-16 21:38:42 +05301639
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001640 if (conf_is_ht(&hw->conf)) {
Sujithc656bbb2009-01-16 21:38:56 +05301641 bf->bf_state.bf_type |= BUF_HT;
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001642 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1643 use_ldpc = true;
1644 }
Sujithe8324352009-01-16 21:38:42 +05301645
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001646 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
Sujithe8324352009-01-16 21:38:42 +05301647
1648 bf->bf_keytype = get_hw_crypto_keytype(skb);
Sujithe8324352009-01-16 21:38:42 +05301649 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1650 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1651 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1652 } else {
1653 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1654 }
1655
Sujith17b182e2009-12-14 14:56:56 +05301656 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1657 (sc->sc_flags & SC_OP_TXAGGR))
Sujithe8324352009-01-16 21:38:42 +05301658 assign_aggr_tid_seqno(skb, bf);
1659
1660 bf->bf_mpdu = skb;
1661
1662 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
1663 skb->len, DMA_TO_DEVICE);
1664 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
1665 bf->bf_mpdu = NULL;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001666 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1667 "dma_mapping_error() on TX\n");
Sujithe8324352009-01-16 21:38:42 +05301668 return -ENOMEM;
1669 }
1670
1671 bf->bf_buf_addr = bf->bf_dmacontext;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05001672
1673 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1674 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1675 bf->bf_isnullfunc = true;
Sujith1b04b932010-01-08 10:36:05 +05301676 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05001677 } else
1678 bf->bf_isnullfunc = false;
1679
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001680 bf->bf_tx_aborted = false;
1681
Sujithe8324352009-01-16 21:38:42 +05301682 return 0;
1683}
1684
1685/* FIXME: tx power */
1686static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1687 struct ath_tx_control *txctl)
1688{
Sujitha22be222009-03-30 15:28:36 +05301689 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301690 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujithc37452b2009-03-09 09:31:57 +05301691 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +05301692 struct ath_node *an = NULL;
1693 struct list_head bf_head;
1694 struct ath_desc *ds;
1695 struct ath_atx_tid *tid;
Sujithcbe61d82009-02-09 13:27:12 +05301696 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301697 int frm_type;
Sujithc37452b2009-03-09 09:31:57 +05301698 __le16 fc;
Sujithe8324352009-01-16 21:38:42 +05301699
1700 frm_type = get_hw_packet_type(skb);
Sujithc37452b2009-03-09 09:31:57 +05301701 fc = hdr->frame_control;
Sujithe8324352009-01-16 21:38:42 +05301702
1703 INIT_LIST_HEAD(&bf_head);
1704 list_add_tail(&bf->list, &bf_head);
1705
1706 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001707 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301708
1709 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1710 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1711
1712 ath9k_hw_filltxdesc(ah, ds,
1713 skb->len, /* segment length */
1714 true, /* first segment */
1715 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001716 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001717 bf->bf_buf_addr,
1718 txctl->txq->axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301719
Sujithe8324352009-01-16 21:38:42 +05301720 spin_lock_bh(&txctl->txq->axq_lock);
1721
1722 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1723 tx_info->control.sta) {
1724 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1725 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1726
Sujithc37452b2009-03-09 09:31:57 +05301727 if (!ieee80211_is_data_qos(fc)) {
1728 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1729 goto tx_done;
1730 }
1731
Felix Fietkau4fdec032010-03-12 04:02:43 +01001732 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
Sujithe8324352009-01-16 21:38:42 +05301733 /*
1734 * Try aggregation if it's a unicast data frame
1735 * and the destination is HT capable.
1736 */
1737 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1738 } else {
1739 /*
1740 * Send this frame as regular when ADDBA
1741 * exchange is neither complete nor pending.
1742 */
Sujithc37452b2009-03-09 09:31:57 +05301743 ath_tx_send_ht_normal(sc, txctl->txq,
1744 tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301745 }
1746 } else {
Sujithc37452b2009-03-09 09:31:57 +05301747 ath_tx_send_normal(sc, txctl->txq, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301748 }
1749
Sujithc37452b2009-03-09 09:31:57 +05301750tx_done:
Sujithe8324352009-01-16 21:38:42 +05301751 spin_unlock_bh(&txctl->txq->axq_lock);
1752}
1753
1754/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001755int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301756 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001757{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001758 struct ath_wiphy *aphy = hw->priv;
1759 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001760 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau84642d62010-06-01 21:33:13 +02001761 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001762 struct ath_buf *bf;
Felix Fietkau97923b12010-06-12 00:33:55 -04001763 int q, r;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001764
Sujithe8324352009-01-16 21:38:42 +05301765 bf = ath_tx_get_buffer(sc);
1766 if (!bf) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001767 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
Sujithe8324352009-01-16 21:38:42 +05301768 return -1;
1769 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001770
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001771 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301772 if (unlikely(r)) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001773 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001774
Sujithe8324352009-01-16 21:38:42 +05301775 /* upon ath_tx_processq() this TX queue will be resumed, we
1776 * guarantee this will happen by knowing beforehand that
1777 * we will at least have to run TX completionon one buffer
1778 * on the queue */
1779 spin_lock_bh(&txq->axq_lock);
Felix Fietkau84642d62010-06-01 21:33:13 +02001780 if (!txq->stopped && txq->axq_depth > 1) {
Luis R. Rodriguezf52de032009-11-02 17:09:12 -08001781 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
Sujithe8324352009-01-16 21:38:42 +05301782 txq->stopped = 1;
1783 }
1784 spin_unlock_bh(&txq->axq_lock);
1785
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001786 ath_tx_return_buffer(sc, bf);
Sujithe8324352009-01-16 21:38:42 +05301787
1788 return r;
1789 }
1790
Felix Fietkau97923b12010-06-12 00:33:55 -04001791 q = skb_get_queue_mapping(skb);
1792 if (q >= 4)
1793 q = 0;
1794
1795 spin_lock_bh(&txq->axq_lock);
1796 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1797 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1798 txq->stopped = 1;
1799 }
1800 spin_unlock_bh(&txq->axq_lock);
1801
Sujithe8324352009-01-16 21:38:42 +05301802 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001803
1804 return 0;
1805}
1806
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001807void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001808{
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001809 struct ath_wiphy *aphy = hw->priv;
1810 struct ath_softc *sc = aphy->sc;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001811 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001812 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1813 int padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301814 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1815 struct ath_tx_control txctl;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001816
Sujithe8324352009-01-16 21:38:42 +05301817 memset(&txctl, 0, sizeof(struct ath_tx_control));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001818
Sujithe8324352009-01-16 21:38:42 +05301819 /*
1820 * As a temporary workaround, assign seq# here; this will likely need
1821 * to be cleaned up to work better with Beacon transmission and virtual
1822 * BSSes.
1823 */
1824 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
Sujithe8324352009-01-16 21:38:42 +05301825 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1826 sc->tx.seq_no += 0x10;
1827 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1828 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001829 }
1830
Sujithe8324352009-01-16 21:38:42 +05301831 /* Add the padding after the header if this is not already done */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001832 padpos = ath9k_cmn_padpos(hdr->frame_control);
1833 padsize = padpos & 3;
1834 if (padsize && skb->len>padpos) {
Sujithe8324352009-01-16 21:38:42 +05301835 if (skb_headroom(skb) < padsize) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001836 ath_print(common, ATH_DBG_XMIT,
1837 "TX CABQ padding failed\n");
Sujithe8324352009-01-16 21:38:42 +05301838 dev_kfree_skb_any(skb);
1839 return;
1840 }
1841 skb_push(skb, padsize);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001842 memmove(skb->data, skb->data + padsize, padpos);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001843 }
1844
Sujithe8324352009-01-16 21:38:42 +05301845 txctl.txq = sc->beacon.cabq;
1846
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001847 ath_print(common, ATH_DBG_XMIT,
1848 "transmitting CABQ packet, skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301849
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001850 if (ath_tx_start(hw, skb, &txctl) != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001851 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
Sujithe8324352009-01-16 21:38:42 +05301852 goto exit;
1853 }
1854
1855 return;
1856exit:
1857 dev_kfree_skb_any(skb);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001858}
1859
Sujithe8324352009-01-16 21:38:42 +05301860/*****************/
1861/* TX Completion */
1862/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001863
Sujithe8324352009-01-16 21:38:42 +05301864static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau827e69b2009-11-15 23:09:25 +01001865 struct ath_wiphy *aphy, int tx_flags)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001866{
Sujithe8324352009-01-16 21:38:42 +05301867 struct ieee80211_hw *hw = sc->hw;
1868 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001869 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001870 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001871 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301872
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001873 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301874
Felix Fietkau827e69b2009-11-15 23:09:25 +01001875 if (aphy)
1876 hw = aphy->hw;
Sujithe8324352009-01-16 21:38:42 +05301877
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301878 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301879 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301880
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301881 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301882 /* Frame was ACKed */
1883 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1884 }
1885
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001886 padpos = ath9k_cmn_padpos(hdr->frame_control);
1887 padsize = padpos & 3;
1888 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301889 /*
1890 * Remove MAC header padding before giving the frame back to
1891 * mac80211.
1892 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001893 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301894 skb_pull(skb, padsize);
1895 }
1896
Sujith1b04b932010-01-08 10:36:05 +05301897 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1898 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001899 ath_print(common, ATH_DBG_PS,
1900 "Going back to sleep after having "
Pavel Roskinf643e512010-01-29 17:22:12 -05001901 "received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301902 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1903 PS_WAIT_FOR_CAB |
1904 PS_WAIT_FOR_PSPOLL_DATA |
1905 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001906 }
1907
Felix Fietkau827e69b2009-11-15 23:09:25 +01001908 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
Jouni Malinenf0ed85c62009-03-03 19:23:31 +02001909 ath9k_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001910 else {
1911 q = skb_get_queue_mapping(skb);
1912 if (q >= 4)
1913 q = 0;
1914
1915 if (--sc->tx.pending_frames[q] < 0)
1916 sc->tx.pending_frames[q] = 0;
1917
Felix Fietkau827e69b2009-11-15 23:09:25 +01001918 ieee80211_tx_status(hw, skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001919 }
Sujithe8324352009-01-16 21:38:42 +05301920}
1921
1922static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001923 struct ath_txq *txq, struct list_head *bf_q,
1924 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301925{
1926 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301927 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301928 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301929
Sujithe8324352009-01-16 21:38:42 +05301930 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301931 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301932
1933 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301934 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301935
1936 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301937 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301938 }
1939
1940 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
Felix Fietkau827e69b2009-11-15 23:09:25 +01001941 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001942 ath_debug_stat_tx(sc, txq, bf, ts);
Sujithe8324352009-01-16 21:38:42 +05301943
1944 /*
1945 * Return the list of ath_buf of this mpdu to free queue
1946 */
1947 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1948 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1949 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1950}
1951
1952static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001953 struct ath_tx_status *ts, int txok)
Sujithe8324352009-01-16 21:38:42 +05301954{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001955 u16 seq_st = 0;
1956 u32 ba[WME_BA_BMP_SIZE >> 5];
Sujithe8324352009-01-16 21:38:42 +05301957 int ba_index;
1958 int nbad = 0;
1959 int isaggr = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001960
Vasanthakumar Thiagarajan7c9fd602010-05-26 19:06:53 -07001961 if (bf->bf_lastbf->bf_tx_aborted)
Sujithe8324352009-01-16 21:38:42 +05301962 return 0;
Sujith528f0c62008-10-29 10:14:26 +05301963
Sujithcd3d39a2008-08-11 14:03:34 +05301964 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001965 if (isaggr) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001966 seq_st = ts->ts_seqnum;
1967 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001968 }
1969
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001970 while (bf) {
Sujithe8324352009-01-16 21:38:42 +05301971 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1972 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1973 nbad++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001974
Sujithe8324352009-01-16 21:38:42 +05301975 bf = bf->bf_next;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001976 }
1977
Sujithe8324352009-01-16 21:38:42 +05301978 return nbad;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001979}
1980
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001981static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301982 int nbad, int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301983{
Sujitha22be222009-03-30 15:28:36 +05301984 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301985 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301986 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau827e69b2009-11-15 23:09:25 +01001987 struct ieee80211_hw *hw = bf->aphy->hw;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301988 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301989
Sujith95e4acb2009-03-13 08:56:09 +05301990 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001991 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301992
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001993 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301994 WARN_ON(tx_rateindex >= hw->max_rates);
1995
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001996 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301997 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Felix Fietkaud9698472010-03-01 13:32:11 +01001998 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
1999 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302000
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002001 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302002 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Sujith254ad0f2009-02-04 08:10:19 +05302003 if (ieee80211_is_data(hdr->frame_control)) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002004 if (ts->ts_flags &
Felix Fietkau827e69b2009-11-15 23:09:25 +01002005 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2006 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002007 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2008 (ts->ts_status & ATH9K_TXERR_FIFO))
Felix Fietkau827e69b2009-11-15 23:09:25 +01002009 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
2010 tx_info->status.ampdu_len = bf->bf_nframes;
2011 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
Sujithc4288392008-11-18 09:09:30 +05302012 }
2013 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302014
Felix Fietkau545750d2009-11-23 22:21:01 +01002015 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302016 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002017 tx_info->status.rates[i].idx = -1;
2018 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302019
2020 tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1;
Sujithc4288392008-11-18 09:09:30 +05302021}
2022
Sujith059d8062009-01-16 21:38:49 +05302023static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2024{
2025 int qnum;
2026
Felix Fietkau97923b12010-06-12 00:33:55 -04002027 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2028 if (qnum == -1)
2029 return;
2030
Sujith059d8062009-01-16 21:38:49 +05302031 spin_lock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04002032 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
2033 ath_mac80211_start_queue(sc, qnum);
2034 txq->stopped = 0;
Sujith059d8062009-01-16 21:38:49 +05302035 }
2036 spin_unlock_bh(&txq->axq_lock);
2037}
2038
Sujithc4288392008-11-18 09:09:30 +05302039static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002040{
Sujithcbe61d82009-02-09 13:27:12 +05302041 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002042 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002043 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2044 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302045 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002046 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302047 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002048 int status;
2049
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002050 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2051 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2052 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002053
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002054 for (;;) {
2055 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002056 if (list_empty(&txq->axq_q)) {
2057 txq->axq_link = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002058 spin_unlock_bh(&txq->axq_lock);
2059 break;
2060 }
2061 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2062
2063 /*
2064 * There is a race condition that a BH gets scheduled
2065 * after sw writes TxE and before hw re-load the last
2066 * descriptor to get the newly chained one.
2067 * Software must keep the last DONE descriptor as a
2068 * holding descriptor - software does so by marking
2069 * it with the STALE flag.
2070 */
2071 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302072 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002073 bf_held = bf;
2074 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302075 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002076 break;
2077 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002078 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302079 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002080 }
2081 }
2082
2083 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302084 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002085
Felix Fietkau29bffa92010-03-29 20:14:23 -07002086 memset(&ts, 0, sizeof(ts));
2087 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002088 if (status == -EINPROGRESS) {
2089 spin_unlock_bh(&txq->axq_lock);
2090 break;
2091 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002092
2093 /*
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05002094 * We now know the nullfunc frame has been ACKed so we
2095 * can disable RX.
2096 */
2097 if (bf->bf_isnullfunc &&
Felix Fietkau29bffa92010-03-29 20:14:23 -07002098 (ts.ts_status & ATH9K_TX_ACKED)) {
Senthil Balasubramanian3f7c5c12010-02-03 22:51:13 +05302099 if ((sc->ps_flags & PS_ENABLED))
2100 ath9k_enable_ps(sc);
2101 else
Sujith1b04b932010-01-08 10:36:05 +05302102 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
Luis R. Rodrigueze7824a52009-11-24 02:53:25 -05002103 }
2104
2105 /*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002106 * Remove ath_buf's of the same transmit unit from txq,
2107 * however leave the last descriptor back as the holding
2108 * descriptor for hw.
2109 */
Sujitha119cc42009-03-30 15:28:38 +05302110 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002111 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002112 if (!list_is_singular(&lastbf->list))
2113 list_cut_position(&bf_head,
2114 &txq->axq_q, lastbf->list.prev);
2115
2116 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002117 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002118 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002119 if (bf_held)
2120 list_del(&bf_held->list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002121 spin_unlock_bh(&txq->axq_lock);
2122
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002123 if (bf_held)
2124 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002125
Sujithcd3d39a2008-08-11 14:03:34 +05302126 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002127 /*
2128 * This frame is sent out as a single frame.
2129 * Use hardware retry status for this frame.
2130 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002131 bf->bf_retries = ts.ts_longretry;
2132 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302133 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002134 ath_tx_rc_status(bf, &ts, 0, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002135 }
Johannes Berge6a98542008-10-21 12:40:02 +02002136
Sujithcd3d39a2008-08-11 14:03:34 +05302137 if (bf_isampdu(bf))
Felix Fietkau29bffa92010-03-29 20:14:23 -07002138 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002139 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002140 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002141
Sujith059d8062009-01-16 21:38:49 +05302142 ath_wake_mac80211_queue(sc, txq);
2143
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002144 spin_lock_bh(&txq->axq_lock);
Sujith672840a2008-08-11 14:05:08 +05302145 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002146 ath_txq_schedule(sc, txq);
2147 spin_unlock_bh(&txq->axq_lock);
2148 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002149}
2150
Sujith305fe472009-07-23 15:32:29 +05302151static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002152{
2153 struct ath_softc *sc = container_of(work, struct ath_softc,
2154 tx_complete_work.work);
2155 struct ath_txq *txq;
2156 int i;
2157 bool needreset = false;
2158
2159 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2160 if (ATH_TXQ_SETUP(sc, i)) {
2161 txq = &sc->tx.txq[i];
2162 spin_lock_bh(&txq->axq_lock);
2163 if (txq->axq_depth) {
2164 if (txq->axq_tx_inprogress) {
2165 needreset = true;
2166 spin_unlock_bh(&txq->axq_lock);
2167 break;
2168 } else {
2169 txq->axq_tx_inprogress = true;
2170 }
2171 }
2172 spin_unlock_bh(&txq->axq_lock);
2173 }
2174
2175 if (needreset) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002176 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2177 "tx hung, resetting the chip\n");
Sujith332c5562009-10-09 09:51:28 +05302178 ath9k_ps_wakeup(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002179 ath_reset(sc, false);
Sujith332c5562009-10-09 09:51:28 +05302180 ath9k_ps_restore(sc);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002181 }
2182
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002183 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002184 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2185}
2186
2187
Sujithe8324352009-01-16 21:38:42 +05302188
2189void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002190{
Sujithe8324352009-01-16 21:38:42 +05302191 int i;
2192 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002193
Sujithe8324352009-01-16 21:38:42 +05302194 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002195
2196 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302197 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2198 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002199 }
2200}
2201
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002202void ath_tx_edma_tasklet(struct ath_softc *sc)
2203{
2204 struct ath_tx_status txs;
2205 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2206 struct ath_hw *ah = sc->sc_ah;
2207 struct ath_txq *txq;
2208 struct ath_buf *bf, *lastbf;
2209 struct list_head bf_head;
2210 int status;
2211 int txok;
2212
2213 for (;;) {
2214 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2215 if (status == -EINPROGRESS)
2216 break;
2217 if (status == -EIO) {
2218 ath_print(common, ATH_DBG_XMIT,
2219 "Error processing tx status\n");
2220 break;
2221 }
2222
2223 /* Skip beacon completions */
2224 if (txs.qid == sc->beacon.beaconq)
2225 continue;
2226
2227 txq = &sc->tx.txq[txs.qid];
2228
2229 spin_lock_bh(&txq->axq_lock);
2230 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2231 spin_unlock_bh(&txq->axq_lock);
2232 return;
2233 }
2234
2235 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2236 struct ath_buf, list);
2237 lastbf = bf->bf_lastbf;
2238
2239 INIT_LIST_HEAD(&bf_head);
2240 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2241 &lastbf->list);
2242 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2243 txq->axq_depth--;
2244 txq->axq_tx_inprogress = false;
2245 spin_unlock_bh(&txq->axq_lock);
2246
2247 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2248
Vasanthakumar Thiagarajande0f6482010-05-17 18:57:54 -07002249 /*
2250 * Make sure null func frame is acked before configuring
2251 * hw into ps mode.
2252 */
2253 if (bf->bf_isnullfunc && txok) {
2254 if ((sc->ps_flags & PS_ENABLED))
2255 ath9k_enable_ps(sc);
2256 else
2257 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2258 }
2259
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002260 if (!bf_isampdu(bf)) {
2261 bf->bf_retries = txs.ts_longretry;
2262 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2263 bf->bf_state.bf_type |= BUF_XRETRY;
2264 ath_tx_rc_status(bf, &txs, 0, txok, true);
2265 }
2266
2267 if (bf_isampdu(bf))
2268 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2269 else
2270 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2271 &txs, txok, 0);
2272
Felix Fietkau7f9f3602010-04-26 15:04:36 -04002273 ath_wake_mac80211_queue(sc, txq);
2274
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002275 spin_lock_bh(&txq->axq_lock);
2276 if (!list_empty(&txq->txq_fifo_pending)) {
2277 INIT_LIST_HEAD(&bf_head);
2278 bf = list_first_entry(&txq->txq_fifo_pending,
2279 struct ath_buf, list);
2280 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2281 &bf->bf_lastbf->list);
2282 ath_tx_txqaddbuf(sc, txq, &bf_head);
2283 } else if (sc->sc_flags & SC_OP_TXAGGR)
2284 ath_txq_schedule(sc, txq);
2285 spin_unlock_bh(&txq->axq_lock);
2286 }
2287}
2288
Sujithe8324352009-01-16 21:38:42 +05302289/*****************/
2290/* Init, Cleanup */
2291/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002292
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002293static int ath_txstatus_setup(struct ath_softc *sc, int size)
2294{
2295 struct ath_descdma *dd = &sc->txsdma;
2296 u8 txs_len = sc->sc_ah->caps.txs_len;
2297
2298 dd->dd_desc_len = size * txs_len;
2299 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2300 &dd->dd_desc_paddr, GFP_KERNEL);
2301 if (!dd->dd_desc)
2302 return -ENOMEM;
2303
2304 return 0;
2305}
2306
2307static int ath_tx_edma_init(struct ath_softc *sc)
2308{
2309 int err;
2310
2311 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2312 if (!err)
2313 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2314 sc->txsdma.dd_desc_paddr,
2315 ATH_TXSTATUS_RING_SIZE);
2316
2317 return err;
2318}
2319
2320static void ath_tx_edma_cleanup(struct ath_softc *sc)
2321{
2322 struct ath_descdma *dd = &sc->txsdma;
2323
2324 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2325 dd->dd_desc_paddr);
2326}
2327
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002328int ath_tx_init(struct ath_softc *sc, int nbufs)
2329{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002330 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002331 int error = 0;
2332
Sujith797fe5cb2009-03-30 15:28:45 +05302333 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002334
Sujith797fe5cb2009-03-30 15:28:45 +05302335 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002336 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302337 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002338 ath_print(common, ATH_DBG_FATAL,
2339 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302340 goto err;
2341 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002342
Sujith797fe5cb2009-03-30 15:28:45 +05302343 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002344 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302345 if (error != 0) {
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002346 ath_print(common, ATH_DBG_FATAL,
2347 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302348 goto err;
2349 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002350
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002351 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2352
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002353 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2354 error = ath_tx_edma_init(sc);
2355 if (error)
2356 goto err;
2357 }
2358
Sujith797fe5cb2009-03-30 15:28:45 +05302359err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002360 if (error != 0)
2361 ath_tx_cleanup(sc);
2362
2363 return error;
2364}
2365
Sujith797fe5cb2009-03-30 15:28:45 +05302366void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002367{
Sujithb77f4832008-12-07 21:44:03 +05302368 if (sc->beacon.bdma.dd_desc_len != 0)
2369 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002370
Sujithb77f4832008-12-07 21:44:03 +05302371 if (sc->tx.txdma.dd_desc_len != 0)
2372 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002373
2374 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2375 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376}
2377
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002378void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2379{
Sujithc5170162008-10-29 10:13:59 +05302380 struct ath_atx_tid *tid;
2381 struct ath_atx_ac *ac;
2382 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002383
Sujith8ee5afb2008-12-07 21:43:36 +05302384 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302385 tidno < WME_NUM_TID;
2386 tidno++, tid++) {
2387 tid->an = an;
2388 tid->tidno = tidno;
2389 tid->seq_start = tid->seq_next = 0;
2390 tid->baw_size = WME_MAX_BA;
2391 tid->baw_head = tid->baw_tail = 0;
2392 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302393 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302394 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302395 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302396 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302397 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302398 tid->state &= ~AGGR_ADDBA_COMPLETE;
2399 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302400 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002401
Sujith8ee5afb2008-12-07 21:43:36 +05302402 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302403 acno < WME_NUM_AC; acno++, ac++) {
2404 ac->sched = false;
Felix Fietkau1d2231e2010-06-12 00:33:51 -04002405 ac->qnum = sc->tx.hwq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302406 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002407 }
2408}
2409
Sujithb5aa9bf2008-10-29 10:13:31 +05302410void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002411{
2412 int i;
2413 struct ath_atx_ac *ac, *ac_tmp;
2414 struct ath_atx_tid *tid, *tid_tmp;
2415 struct ath_txq *txq;
Sujithe8324352009-01-16 21:38:42 +05302416
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002417 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2418 if (ATH_TXQ_SETUP(sc, i)) {
Sujithb77f4832008-12-07 21:44:03 +05302419 txq = &sc->tx.txq[i];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002420
Ming Leia9f042c2010-02-28 00:56:24 +08002421 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002422
2423 list_for_each_entry_safe(ac,
2424 ac_tmp, &txq->axq_acq, list) {
2425 tid = list_first_entry(&ac->tid_q,
2426 struct ath_atx_tid, list);
2427 if (tid && tid->an != an)
2428 continue;
2429 list_del(&ac->list);
2430 ac->sched = false;
2431
2432 list_for_each_entry_safe(tid,
2433 tid_tmp, &ac->tid_q, list) {
2434 list_del(&tid->list);
2435 tid->sched = false;
Sujithb5aa9bf2008-10-29 10:13:31 +05302436 ath_tid_drain(sc, txq, tid);
Sujitha37c2c72008-10-29 10:15:40 +05302437 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujitha37c2c72008-10-29 10:15:40 +05302438 tid->state &= ~AGGR_CLEANUP;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002439 }
2440 }
2441
Ming Leia9f042c2010-02-28 00:56:24 +08002442 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002443 }
2444 }
2445}