blob: e16136d617999a03ea2032febc9c465e489623ee [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070022#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
23#define L_STF 8
24#define L_LTF 8
25#define L_SIG 4
26#define HT_SIG 8
27#define HT_STF 4
28#define HT_LTF(_ns) (4 * (_ns))
29#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
33
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070034
Felix Fietkauc6663872010-04-19 19:57:33 +020035static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070036 /* 20MHz 40MHz */
37 { 26, 54 }, /* 0: BPSK */
38 { 52, 108 }, /* 1: QPSK 1/2 */
39 { 78, 162 }, /* 2: QPSK 3/4 */
40 { 104, 216 }, /* 3: 16-QAM 1/2 */
41 { 156, 324 }, /* 4: 16-QAM 3/4 */
42 { 208, 432 }, /* 5: 64-QAM 2/3 */
43 { 234, 486 }, /* 6: 64-QAM 3/4 */
44 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070045};
46
47#define IS_HT_RATE(_rate) ((_rate) & 0x80)
48
Felix Fietkau82b873a2010-11-11 03:18:37 +010049static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
50 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010051 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053052static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070053 struct ath_txq *txq, struct list_head *bf_q,
54 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053055static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
56 struct list_head *head);
Felix Fietkau269c44b2010-11-14 15:20:06 +010057static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010058static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
59 struct ath_tx_status *ts, int nframes, int nbad,
60 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020061static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
62 int seqno);
Sujithe8324352009-01-16 21:38:42 +053063
Felix Fietkau545750d2009-11-23 22:21:01 +010064enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020065 MCS_HT20,
66 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010067 MCS_HT40,
68 MCS_HT40_SGI,
69};
70
Felix Fietkau0e668cd2010-04-19 19:57:32 +020071static int ath_max_4ms_framelen[4][32] = {
72 [MCS_HT20] = {
73 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
74 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
75 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
76 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
77 },
78 [MCS_HT20_SGI] = {
79 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
80 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
81 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
82 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010083 },
84 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020085 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
86 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
87 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
88 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010089 },
90 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020091 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
92 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
93 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
94 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010095 }
96};
97
Sujithe8324352009-01-16 21:38:42 +053098/*********************/
99/* Aggregation logic */
100/*********************/
101
Sujithe8324352009-01-16 21:38:42 +0530102static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
103{
104 struct ath_atx_ac *ac = tid->ac;
105
106 if (tid->paused)
107 return;
108
109 if (tid->sched)
110 return;
111
112 tid->sched = true;
113 list_add_tail(&tid->list, &ac->tid_q);
114
115 if (ac->sched)
116 return;
117
118 ac->sched = true;
119 list_add_tail(&ac->list, &txq->axq_acq);
120}
121
Sujithe8324352009-01-16 21:38:42 +0530122static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
123{
Felix Fietkau066dae92010-11-07 14:59:39 +0100124 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530125
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200126 WARN_ON(!tid->paused);
127
Sujithe8324352009-01-16 21:38:42 +0530128 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200129 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530130
131 if (list_empty(&tid->buf_q))
132 goto unlock;
133
134 ath_tx_queue_tid(txq, tid);
135 ath_txq_schedule(sc, txq);
136unlock:
137 spin_unlock_bh(&txq->axq_lock);
138}
139
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100140static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100141{
142 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100143 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
144 sizeof(tx_info->rate_driver_data));
145 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100146}
147
Sujithe8324352009-01-16 21:38:42 +0530148static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
149{
Felix Fietkau066dae92010-11-07 14:59:39 +0100150 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530151 struct ath_buf *bf;
152 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200153 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100154 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200155
Sujithe8324352009-01-16 21:38:42 +0530156 INIT_LIST_HEAD(&bf_head);
157
Felix Fietkau90fa5392010-09-20 13:45:38 +0200158 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530159 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530160
161 while (!list_empty(&tid->buf_q)) {
162 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530163 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164
Felix Fietkaue1566d12010-11-20 03:08:46 +0100165 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100166 fi = get_frame_info(bf->bf_mpdu);
167 if (fi->retries) {
168 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200169 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
170 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700171 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200172 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100173 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530174 }
175
176 spin_unlock_bh(&txq->axq_lock);
177}
178
179static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
180 int seqno)
181{
182 int index, cindex;
183
184 index = ATH_BA_INDEX(tid->seq_start, seqno);
185 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
186
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200187 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530188
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200189 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530190 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
191 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
192 }
193}
194
195static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100196 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530197{
198 int index, cindex;
199
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100200 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530201 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200202 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530203
204 if (index >= ((tid->baw_tail - tid->baw_head) &
205 (ATH_TID_MAX_BUFS - 1))) {
206 tid->baw_tail = cindex;
207 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
208 }
209}
210
211/*
212 * TODO: For frame(s) that are in the retry state, we will reuse the
213 * sequence number(s) without setting the retry bit. The
214 * alternative is to give up on these and BAR the receiver's window
215 * forward.
216 */
217static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
218 struct ath_atx_tid *tid)
219
220{
221 struct ath_buf *bf;
222 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700223 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100224 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700225
226 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530227 INIT_LIST_HEAD(&bf_head);
228
229 for (;;) {
230 if (list_empty(&tid->buf_q))
231 break;
Sujithe8324352009-01-16 21:38:42 +0530232
Sujithd43f30152009-01-16 21:38:53 +0530233 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
234 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530235
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100236 fi = get_frame_info(bf->bf_mpdu);
237 if (fi->retries)
238 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530239
240 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700241 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530242 spin_lock(&txq->axq_lock);
243 }
244
245 tid->seq_next = tid->seq_start;
246 tid->baw_tail = tid->baw_head;
247}
248
Sujithfec247c2009-07-27 12:08:16 +0530249static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100250 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530251{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100252 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530253 struct ieee80211_hdr *hdr;
254
Sujithfec247c2009-07-27 12:08:16 +0530255 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100256 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100257 return;
Sujithe8324352009-01-16 21:38:42 +0530258
Sujithe8324352009-01-16 21:38:42 +0530259 hdr = (struct ieee80211_hdr *)skb->data;
260 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
261}
262
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200263static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
264{
265 struct ath_buf *bf = NULL;
266
267 spin_lock_bh(&sc->tx.txbuflock);
268
269 if (unlikely(list_empty(&sc->tx.txbuf))) {
270 spin_unlock_bh(&sc->tx.txbuflock);
271 return NULL;
272 }
273
274 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
275 list_del(&bf->list);
276
277 spin_unlock_bh(&sc->tx.txbuflock);
278
279 return bf;
280}
281
282static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
283{
284 spin_lock_bh(&sc->tx.txbuflock);
285 list_add_tail(&bf->list, &sc->tx.txbuf);
286 spin_unlock_bh(&sc->tx.txbuflock);
287}
288
Sujithd43f30152009-01-16 21:38:53 +0530289static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
290{
291 struct ath_buf *tbf;
292
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200293 tbf = ath_tx_get_buffer(sc);
294 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530295 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530296
297 ATH_TXBUF_RESET(tbf);
298
299 tbf->bf_mpdu = bf->bf_mpdu;
300 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400301 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530302 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530303
304 return tbf;
305}
306
Felix Fietkaub572d032010-11-14 15:20:07 +0100307static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
308 struct ath_tx_status *ts, int txok,
309 int *nframes, int *nbad)
310{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100311 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100312 u16 seq_st = 0;
313 u32 ba[WME_BA_BMP_SIZE >> 5];
314 int ba_index;
315 int isaggr = 0;
316
317 *nbad = 0;
318 *nframes = 0;
319
Felix Fietkaub572d032010-11-14 15:20:07 +0100320 isaggr = bf_isaggr(bf);
321 if (isaggr) {
322 seq_st = ts->ts_seqnum;
323 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
324 }
325
326 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100327 fi = get_frame_info(bf->bf_mpdu);
328 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100329
330 (*nframes)++;
331 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
332 (*nbad)++;
333
334 bf = bf->bf_next;
335 }
336}
337
338
Sujithd43f30152009-01-16 21:38:53 +0530339static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
340 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100341 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530342{
343 struct ath_node *an = NULL;
344 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530345 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100346 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530347 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800348 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530349 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530350 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530351 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530352 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530353 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530354 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
355 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200356 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100357 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200358 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100359 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +0530360
Sujitha22be222009-03-30 15:28:36 +0530361 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530362 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530363
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800364 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800365
Felix Fietkau78c46532010-06-25 01:26:16 +0200366 memcpy(rates, tx_info->control.rates, sizeof(rates));
367
Sujith1286ec62009-01-27 13:30:37 +0530368 rcu_read_lock();
369
Ben Greear686b9cb2010-09-23 09:44:36 -0700370 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530371 if (!sta) {
372 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200373
Felix Fietkau31e79a52010-07-12 23:16:34 +0200374 INIT_LIST_HEAD(&bf_head);
375 while (bf) {
376 bf_next = bf->bf_next;
377
378 bf->bf_state.bf_type |= BUF_XRETRY;
379 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
380 !bf->bf_stale || bf_next != NULL)
381 list_move_tail(&bf->list, &bf_head);
382
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100383 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200384 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
385 0, 0);
386
387 bf = bf_next;
388 }
Sujith1286ec62009-01-27 13:30:37 +0530389 return;
Sujithe8324352009-01-16 21:38:42 +0530390 }
391
Sujith1286ec62009-01-27 13:30:37 +0530392 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100393 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
394 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530395
Felix Fietkaub11b1602010-07-11 12:48:44 +0200396 /*
397 * The hardware occasionally sends a tx status for the wrong TID.
398 * In this case, the BA status cannot be considered valid and all
399 * subframes need to be retransmitted
400 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100401 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200402 txok = false;
403
Sujithe8324352009-01-16 21:38:42 +0530404 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530405 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530406
Sujithd43f30152009-01-16 21:38:53 +0530407 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700408 if (ts->ts_flags & ATH9K_TX_BA) {
409 seq_st = ts->ts_seqnum;
410 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530411 } else {
Sujithd43f30152009-01-16 21:38:53 +0530412 /*
413 * AR5416 can become deaf/mute when BA
414 * issue happens. Chip needs to be reset.
415 * But AP code may have sychronization issues
416 * when perform internal reset in this routine.
417 * Only enable reset in STA mode for now.
418 */
Sujith2660b812009-02-09 13:27:26 +0530419 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530420 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530421 }
422 }
423
424 INIT_LIST_HEAD(&bf_pending);
425 INIT_LIST_HEAD(&bf_head);
426
Felix Fietkaub572d032010-11-14 15:20:07 +0100427 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530428 while (bf) {
Felix Fietkauf0b82202011-01-15 14:30:15 +0100429 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530430 bf_next = bf->bf_next;
431
Felix Fietkau78c46532010-06-25 01:26:16 +0200432 skb = bf->bf_mpdu;
433 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100434 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200435
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100436 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530437 /* transmit completion, subframe is
438 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530439 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530440 } else if (!isaggr && txok) {
441 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530442 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530443 } else {
Felix Fietkauc5992612010-11-14 15:20:09 +0100444 if (!(tid->state & AGGR_CLEANUP) && retry) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100445 if (fi->retries < ATH_MAX_SW_RETRIES) {
446 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530447 txpending = 1;
448 } else {
449 bf->bf_state.bf_type |= BUF_XRETRY;
450 txfail = 1;
451 sendbar = 1;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530452 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530453 }
454 } else {
455 /*
456 * cleanup in progress, just fail
457 * the un-acked sub-frames
458 */
459 txfail = 1;
460 }
461 }
462
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400463 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
464 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530465 /*
466 * Make sure the last desc is reclaimed if it
467 * not a holding desc.
468 */
469 if (!bf_last->bf_stale)
470 list_move_tail(&bf->list, &bf_head);
471 else
472 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530473 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700474 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530475 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530476 }
477
Felix Fietkau90fa5392010-09-20 13:45:38 +0200478 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530479 /*
480 * complete the acked-ones/xretried ones; update
481 * block-ack window
482 */
483 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100484 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530485 spin_unlock_bh(&txq->axq_lock);
486
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530487 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200488 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100489 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530490 rc_update = false;
491 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100492 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530493 }
494
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700495 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
496 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530497 } else {
Sujithd43f30152009-01-16 21:38:53 +0530498 /* retry the un-acked ones */
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400499 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
500 if (bf->bf_next == NULL && bf_last->bf_stale) {
501 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530502
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400503 tbf = ath_clone_txbuf(sc, bf_last);
504 /*
505 * Update tx baw and complete the
506 * frame with failed status if we
507 * run out of tx buf.
508 */
509 if (!tbf) {
510 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100511 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400512 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400513
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400514 bf->bf_state.bf_type |=
515 BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100516 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100517 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400518 ath_tx_complete_buf(sc, bf, txq,
519 &bf_head,
520 ts, 0, 0);
521 break;
522 }
523
524 ath9k_hw_cleartxdesc(sc->sc_ah,
525 tbf->bf_desc);
526 list_add_tail(&tbf->list, &bf_head);
527 } else {
528 /*
529 * Clear descriptor status words for
530 * software retry
531 */
532 ath9k_hw_cleartxdesc(sc->sc_ah,
533 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400534 }
Sujithe8324352009-01-16 21:38:42 +0530535 }
536
537 /*
538 * Put this buffer to the temporary pending
539 * queue to retain ordering
540 */
541 list_splice_tail_init(&bf_head, &bf_pending);
542 }
543
544 bf = bf_next;
545 }
546
Felix Fietkau4cee7862010-07-23 03:53:16 +0200547 /* prepend un-acked frames to the beginning of the pending frame queue */
548 if (!list_empty(&bf_pending)) {
549 spin_lock_bh(&txq->axq_lock);
550 list_splice(&bf_pending, &tid->buf_q);
551 ath_tx_queue_tid(txq, tid);
552 spin_unlock_bh(&txq->axq_lock);
553 }
554
Sujithe8324352009-01-16 21:38:42 +0530555 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200556 ath_tx_flush_tid(sc, tid);
557
Sujithe8324352009-01-16 21:38:42 +0530558 if (tid->baw_head == tid->baw_tail) {
559 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530560 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530561 }
Sujithe8324352009-01-16 21:38:42 +0530562 }
563
Sujith1286ec62009-01-27 13:30:37 +0530564 rcu_read_unlock();
565
Vivek Natarajanbdd62c02011-01-27 14:45:10 +0530566 if (needreset) {
567 spin_unlock_bh(&sc->sc_pcu_lock);
Sujithe8324352009-01-16 21:38:42 +0530568 ath_reset(sc, false);
Vivek Natarajanbdd62c02011-01-27 14:45:10 +0530569 spin_lock_bh(&sc->sc_pcu_lock);
570 }
Sujithe8324352009-01-16 21:38:42 +0530571}
572
573static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
574 struct ath_atx_tid *tid)
575{
Sujithe8324352009-01-16 21:38:42 +0530576 struct sk_buff *skb;
577 struct ieee80211_tx_info *tx_info;
578 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530579 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530580 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530581 int i;
582
Sujitha22be222009-03-30 15:28:36 +0530583 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530584 tx_info = IEEE80211_SKB_CB(skb);
585 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530586
587 /*
588 * Find the lowest frame length among the rate series that will have a
589 * 4ms transmit duration.
590 * TODO - TXOP limit needs to be considered.
591 */
592 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
593
594 for (i = 0; i < 4; i++) {
595 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100596 int modeidx;
597 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530598 legacy = 1;
599 break;
600 }
601
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200602 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100603 modeidx = MCS_HT40;
604 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200605 modeidx = MCS_HT20;
606
607 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
608 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100609
610 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530611 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530612 }
613 }
614
615 /*
616 * limit aggregate size by the minimum rate if rate selected is
617 * not a probe rate, if rate selected is a probe rate then
618 * avoid aggregation of this packet.
619 */
620 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
621 return 0;
622
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530623 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
624 aggr_limit = min((max_4ms_framelen * 3) / 8,
625 (u32)ATH_AMPDU_LIMIT_MAX);
626 else
627 aggr_limit = min(max_4ms_framelen,
628 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530629
630 /*
631 * h/w can accept aggregates upto 16 bit lengths (65535).
632 * The IE, however can hold upto 65536, which shows up here
633 * as zero. Ignore 65536 since we are constrained by hw.
634 */
Sujith4ef70842009-07-23 15:32:41 +0530635 if (tid->an->maxampdu)
636 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530637
638 return aggr_limit;
639}
640
641/*
Sujithd43f30152009-01-16 21:38:53 +0530642 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530643 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530644 */
645static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
646 struct ath_buf *bf, u16 frmlen)
647{
Sujithe8324352009-01-16 21:38:42 +0530648 struct sk_buff *skb = bf->bf_mpdu;
649 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530650 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530651 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100652 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200653 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100654 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530655
656 /* Select standard number of delimiters based on frame length alone */
657 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
658
659 /*
660 * If encryption enabled, hardware requires some more padding between
661 * subframes.
662 * TODO - this could be improved to be dependent on the rate.
663 * The hardware can keep up at lower rates, but not higher rates
664 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100665 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
Sujithe8324352009-01-16 21:38:42 +0530666 ndelim += ATH_AGGR_ENCRYPTDELIM;
667
668 /*
669 * Convert desired mpdu density from microeconds to bytes based
670 * on highest rate in rate series (i.e. first rate) to determine
671 * required minimum length for subframe. Take into account
672 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530673 *
Sujithe8324352009-01-16 21:38:42 +0530674 * If there is no mpdu density restriction, no further calculation
675 * is needed.
676 */
Sujith4ef70842009-07-23 15:32:41 +0530677
678 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530679 return ndelim;
680
681 rix = tx_info->control.rates[0].idx;
682 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530683 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
684 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
685
686 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530687 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530688 else
Sujith4ef70842009-07-23 15:32:41 +0530689 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530690
691 if (nsymbols == 0)
692 nsymbols = 1;
693
Felix Fietkauc6663872010-04-19 19:57:33 +0200694 streams = HT_RC_2_STREAMS(rix);
695 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530696 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
697
Sujithe8324352009-01-16 21:38:42 +0530698 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530699 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
700 ndelim = max(mindelim, ndelim);
701 }
702
703 return ndelim;
704}
705
706static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530707 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530708 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100709 struct list_head *bf_q,
710 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530711{
712#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530713 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
714 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530715 u16 aggr_limit = 0, al = 0, bpad = 0,
716 al_delta, h_baw = tid->baw_size / 2;
717 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200718 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100719 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530720
721 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
722
723 do {
724 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100725 fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530726
Sujithd43f30152009-01-16 21:38:53 +0530727 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100728 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530729 status = ATH_AGGR_BAW_CLOSED;
730 break;
731 }
732
733 if (!rl) {
734 aggr_limit = ath_lookup_rate(sc, bf, tid);
735 rl = 1;
736 }
737
Sujithd43f30152009-01-16 21:38:53 +0530738 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100739 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530740
Sujithd43f30152009-01-16 21:38:53 +0530741 if (nframes &&
742 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530743 status = ATH_AGGR_LIMITED;
744 break;
745 }
746
Felix Fietkau0299a502010-10-21 02:47:24 +0200747 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
748 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
749 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
750 break;
751
Sujithd43f30152009-01-16 21:38:53 +0530752 /* do not exceed subframe limit */
753 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530754 status = ATH_AGGR_LIMITED;
755 break;
756 }
Sujithd43f30152009-01-16 21:38:53 +0530757 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530758
Sujithd43f30152009-01-16 21:38:53 +0530759 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530760 al += bpad + al_delta;
761
762 /*
763 * Get the delimiters needed to meet the MPDU
764 * density for this node.
765 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100766 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530767 bpad = PADBYTES(al_delta) + (ndelim << 2);
768
769 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400770 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530771
Sujithd43f30152009-01-16 21:38:53 +0530772 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100773 if (!fi->retries)
774 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530775 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
776 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530777 if (bf_prev) {
778 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400779 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
780 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530781 }
782 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530783
Sujithe8324352009-01-16 21:38:42 +0530784 } while (!list_empty(&tid->buf_q));
785
Felix Fietkau269c44b2010-11-14 15:20:06 +0100786 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530787
Sujithe8324352009-01-16 21:38:42 +0530788 return status;
789#undef PADBYTES
790}
791
792static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
793 struct ath_atx_tid *tid)
794{
Sujithd43f30152009-01-16 21:38:53 +0530795 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530796 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100797 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530798 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100799 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530800
801 do {
802 if (list_empty(&tid->buf_q))
803 return;
804
805 INIT_LIST_HEAD(&bf_q);
806
Felix Fietkau269c44b2010-11-14 15:20:06 +0100807 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530808
809 /*
Sujithd43f30152009-01-16 21:38:53 +0530810 * no frames picked up to be aggregated;
811 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530812 */
813 if (list_empty(&bf_q))
814 break;
815
816 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530817 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530818
Sujithd43f30152009-01-16 21:38:53 +0530819 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100820 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100821 fi = get_frame_info(bf->bf_mpdu);
822
Sujithe8324352009-01-16 21:38:42 +0530823 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530824 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100825 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530826 ath_tx_txqaddbuf(sc, txq, &bf_q);
827 continue;
828 }
829
Sujithd43f30152009-01-16 21:38:53 +0530830 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530831 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100832 ath_buf_set_rate(sc, bf, aggr_len);
833 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530834
Sujithd43f30152009-01-16 21:38:53 +0530835 /* anchor last desc of aggregate */
836 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530837
Sujithe8324352009-01-16 21:38:42 +0530838 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530839 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530840
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100841 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530842 status != ATH_AGGR_BAW_CLOSED);
843}
844
Felix Fietkau231c3a12010-09-20 19:35:28 +0200845int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
846 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530847{
848 struct ath_atx_tid *txtid;
849 struct ath_node *an;
850
851 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530852 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200853
854 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
855 return -EAGAIN;
856
Sujithf83da962009-07-23 15:32:37 +0530857 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200858 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700859 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200860
Felix Fietkau2ed72222011-01-10 17:05:49 -0700861 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
862 txtid->baw_head = txtid->baw_tail = 0;
863
Felix Fietkau231c3a12010-09-20 19:35:28 +0200864 return 0;
Sujithe8324352009-01-16 21:38:42 +0530865}
866
Sujithf83da962009-07-23 15:32:37 +0530867void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530868{
869 struct ath_node *an = (struct ath_node *)sta->drv_priv;
870 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100871 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530872
873 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530874 return;
Sujithe8324352009-01-16 21:38:42 +0530875
876 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530877 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530878 return;
Sujithe8324352009-01-16 21:38:42 +0530879 }
880
Sujithe8324352009-01-16 21:38:42 +0530881 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200882 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200883
884 /*
885 * If frames are still being transmitted for this TID, they will be
886 * cleaned up during tx completion. To prevent race conditions, this
887 * TID can only be reused after all in-progress subframes have been
888 * completed.
889 */
890 if (txtid->baw_head != txtid->baw_tail)
891 txtid->state |= AGGR_CLEANUP;
892 else
893 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530894 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530895
Felix Fietkau90fa5392010-09-20 13:45:38 +0200896 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530897}
898
899void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
900{
901 struct ath_atx_tid *txtid;
902 struct ath_node *an;
903
904 an = (struct ath_node *)sta->drv_priv;
905
906 if (sc->sc_flags & SC_OP_TXAGGR) {
907 txtid = ATH_AN_2_TID(an, tid);
908 txtid->baw_size =
909 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
910 txtid->state |= AGGR_ADDBA_COMPLETE;
911 txtid->state &= ~AGGR_ADDBA_PROGRESS;
912 ath_tx_resume_tid(sc, txtid);
913 }
914}
915
Sujithe8324352009-01-16 21:38:42 +0530916/********************/
917/* Queue Management */
918/********************/
919
Sujithe8324352009-01-16 21:38:42 +0530920static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
921 struct ath_txq *txq)
922{
923 struct ath_atx_ac *ac, *ac_tmp;
924 struct ath_atx_tid *tid, *tid_tmp;
925
926 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
927 list_del(&ac->list);
928 ac->sched = false;
929 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
930 list_del(&tid->list);
931 tid->sched = false;
932 ath_tid_drain(sc, txq, tid);
933 }
934 }
935}
936
937struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
938{
Sujithcbe61d82009-02-09 13:27:12 +0530939 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -0700940 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +0530941 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +0100942 static const int subtype_txq_to_hwq[] = {
943 [WME_AC_BE] = ATH_TXQ_AC_BE,
944 [WME_AC_BK] = ATH_TXQ_AC_BK,
945 [WME_AC_VI] = ATH_TXQ_AC_VI,
946 [WME_AC_VO] = ATH_TXQ_AC_VO,
947 };
Ben Greear60f2d1d2011-01-09 23:11:52 -0800948 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +0530949
950 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +0100951 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +0530952 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
953 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
954 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
955 qi.tqi_physCompBuf = 0;
956
957 /*
958 * Enable interrupts only for EOL and DESC conditions.
959 * We mark tx descriptors to receive a DESC interrupt
960 * when a tx queue gets deep; otherwise waiting for the
961 * EOL to reap descriptors. Note that this is done to
962 * reduce interrupt load and this only defers reaping
963 * descriptors, never transmitting frames. Aside from
964 * reducing interrupts this also permits more concurrency.
965 * The only potential downside is if the tx queue backs
966 * up in which case the top half of the kernel may backup
967 * due to a lack of tx descriptors.
968 *
969 * The UAPSD queue is an exception, since we take a desc-
970 * based intr on the EOSP frames.
971 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -0400972 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
973 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
974 TXQ_FLAG_TXERRINT_ENABLE;
975 } else {
976 if (qtype == ATH9K_TX_QUEUE_UAPSD)
977 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
978 else
979 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
980 TXQ_FLAG_TXDESCINT_ENABLE;
981 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800982 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
983 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +0530984 /*
985 * NB: don't print a message, this happens
986 * normally on parts with too few tx queues
987 */
988 return NULL;
989 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800990 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -0800991 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -0800992 axq_qnum, ARRAY_SIZE(sc->tx.txq));
993 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +0530994 return NULL;
995 }
Ben Greear60f2d1d2011-01-09 23:11:52 -0800996 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
997 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +0530998
Ben Greear60f2d1d2011-01-09 23:11:52 -0800999 txq->axq_qnum = axq_qnum;
1000 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301001 txq->axq_link = NULL;
1002 INIT_LIST_HEAD(&txq->axq_q);
1003 INIT_LIST_HEAD(&txq->axq_acq);
1004 spin_lock_init(&txq->axq_lock);
1005 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001006 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001007 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001008 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001009
1010 txq->txq_headidx = txq->txq_tailidx = 0;
1011 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1012 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1013 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +05301014 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001015 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301016}
1017
Sujithe8324352009-01-16 21:38:42 +05301018int ath_txq_update(struct ath_softc *sc, int qnum,
1019 struct ath9k_tx_queue_info *qinfo)
1020{
Sujithcbe61d82009-02-09 13:27:12 +05301021 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301022 int error = 0;
1023 struct ath9k_tx_queue_info qi;
1024
1025 if (qnum == sc->beacon.beaconq) {
1026 /*
1027 * XXX: for beacon queue, we just save the parameter.
1028 * It will be picked up by ath_beaconq_config when
1029 * it's necessary.
1030 */
1031 sc->beacon.beacon_qi = *qinfo;
1032 return 0;
1033 }
1034
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001035 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301036
1037 ath9k_hw_get_txq_props(ah, qnum, &qi);
1038 qi.tqi_aifs = qinfo->tqi_aifs;
1039 qi.tqi_cwmin = qinfo->tqi_cwmin;
1040 qi.tqi_cwmax = qinfo->tqi_cwmax;
1041 qi.tqi_burstTime = qinfo->tqi_burstTime;
1042 qi.tqi_readyTime = qinfo->tqi_readyTime;
1043
1044 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001045 ath_err(ath9k_hw_common(sc->sc_ah),
1046 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301047 error = -EIO;
1048 } else {
1049 ath9k_hw_resettxqueue(ah, qnum);
1050 }
1051
1052 return error;
1053}
1054
1055int ath_cabq_update(struct ath_softc *sc)
1056{
1057 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001058 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301059 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301060
1061 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1062 /*
1063 * Ensure the readytime % is within the bounds.
1064 */
Sujith17d79042009-02-09 13:27:03 +05301065 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1066 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1067 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1068 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301069
Steve Brown9814f6b2011-02-07 17:10:39 -07001070 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301071 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301072 ath_txq_update(sc, qnum, &qi);
1073
1074 return 0;
1075}
1076
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001077static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1078{
1079 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1080 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1081}
1082
Sujith043a0402009-01-16 21:38:47 +05301083/*
1084 * Drain a given TX queue (could be Beacon or Data)
1085 *
1086 * This assumes output has been stopped and
1087 * we do not need to block ath_tx_tasklet.
1088 */
1089void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301090{
1091 struct ath_buf *bf, *lastbf;
1092 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001093 struct ath_tx_status ts;
1094
1095 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301096 INIT_LIST_HEAD(&bf_head);
1097
Sujithe8324352009-01-16 21:38:42 +05301098 for (;;) {
1099 spin_lock_bh(&txq->axq_lock);
1100
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001101 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1102 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1103 txq->txq_headidx = txq->txq_tailidx = 0;
1104 spin_unlock_bh(&txq->axq_lock);
1105 break;
1106 } else {
1107 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1108 struct ath_buf, list);
1109 }
1110 } else {
1111 if (list_empty(&txq->axq_q)) {
1112 txq->axq_link = NULL;
1113 spin_unlock_bh(&txq->axq_lock);
1114 break;
1115 }
1116 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1117 list);
Sujithe8324352009-01-16 21:38:42 +05301118
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001119 if (bf->bf_stale) {
1120 list_del(&bf->list);
1121 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301122
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001123 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001124 continue;
1125 }
Sujithe8324352009-01-16 21:38:42 +05301126 }
1127
1128 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05301129
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001130 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1131 list_cut_position(&bf_head,
1132 &txq->txq_fifo[txq->txq_tailidx],
1133 &lastbf->list);
1134 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1135 } else {
1136 /* remove ath_buf's of the same mpdu from txq */
1137 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1138 }
1139
Sujithe8324352009-01-16 21:38:42 +05301140 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001141 if (bf_is_ampdu_not_probing(bf))
1142 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301143 spin_unlock_bh(&txq->axq_lock);
1144
1145 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001146 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1147 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301148 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001149 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301150 }
1151
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001152 spin_lock_bh(&txq->axq_lock);
1153 txq->axq_tx_inprogress = false;
1154 spin_unlock_bh(&txq->axq_lock);
1155
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001156 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1157 spin_lock_bh(&txq->axq_lock);
1158 while (!list_empty(&txq->txq_fifo_pending)) {
1159 bf = list_first_entry(&txq->txq_fifo_pending,
1160 struct ath_buf, list);
1161 list_cut_position(&bf_head,
1162 &txq->txq_fifo_pending,
1163 &bf->bf_lastbf->list);
1164 spin_unlock_bh(&txq->axq_lock);
1165
1166 if (bf_isampdu(bf))
1167 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
Felix Fietkauc5992612010-11-14 15:20:09 +01001168 &ts, 0, retry_tx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001169 else
1170 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1171 &ts, 0, 0);
1172 spin_lock_bh(&txq->axq_lock);
1173 }
1174 spin_unlock_bh(&txq->axq_lock);
1175 }
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001176
1177 /* flush any pending frames if aggregation is enabled */
1178 if (sc->sc_flags & SC_OP_TXAGGR) {
1179 if (!retry_tx) {
1180 spin_lock_bh(&txq->axq_lock);
1181 ath_txq_drain_pending_buffers(sc, txq);
1182 spin_unlock_bh(&txq->axq_lock);
1183 }
1184 }
Sujithe8324352009-01-16 21:38:42 +05301185}
1186
Felix Fietkau080e1a22010-12-05 20:17:53 +01001187bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301188{
Sujithcbe61d82009-02-09 13:27:12 +05301189 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001190 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301191 struct ath_txq *txq;
1192 int i, npend = 0;
1193
1194 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001195 return true;
Sujith043a0402009-01-16 21:38:47 +05301196
1197 /* Stop beacon queue */
1198 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1199
1200 /* Stop data queues */
1201 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1202 if (ATH_TXQ_SETUP(sc, i)) {
1203 txq = &sc->tx.txq[i];
1204 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1205 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1206 }
1207 }
1208
Felix Fietkau080e1a22010-12-05 20:17:53 +01001209 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001210 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301211
1212 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001213 if (!ATH_TXQ_SETUP(sc, i))
1214 continue;
1215
1216 /*
1217 * The caller will resume queues with ieee80211_wake_queues.
1218 * Mark the queue as not stopped to prevent ath_tx_complete
1219 * from waking the queue too early.
1220 */
1221 txq = &sc->tx.txq[i];
1222 txq->stopped = false;
1223 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301224 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001225
1226 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301227}
1228
Sujithe8324352009-01-16 21:38:42 +05301229void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1230{
1231 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1232 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1233}
1234
Ben Greear7755bad2011-01-18 17:30:00 -08001235/* For each axq_acq entry, for each tid, try to schedule packets
1236 * for transmit until ampdu_depth has reached min Q depth.
1237 */
Sujithe8324352009-01-16 21:38:42 +05301238void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1239{
Ben Greear7755bad2011-01-18 17:30:00 -08001240 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1241 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301242
Felix Fietkau21f28e62011-01-15 14:30:14 +01001243 if (list_empty(&txq->axq_acq) ||
1244 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301245 return;
1246
1247 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001248 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301249
Ben Greear7755bad2011-01-18 17:30:00 -08001250 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1251 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1252 list_del(&ac->list);
1253 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301254
Ben Greear7755bad2011-01-18 17:30:00 -08001255 while (!list_empty(&ac->tid_q)) {
1256 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1257 list);
1258 list_del(&tid->list);
1259 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301260
Ben Greear7755bad2011-01-18 17:30:00 -08001261 if (tid->paused)
1262 continue;
Sujithe8324352009-01-16 21:38:42 +05301263
Ben Greear7755bad2011-01-18 17:30:00 -08001264 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301265
Ben Greear7755bad2011-01-18 17:30:00 -08001266 /*
1267 * add tid to round-robin queue if more frames
1268 * are pending for the tid
1269 */
1270 if (!list_empty(&tid->buf_q))
1271 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301272
Ben Greear7755bad2011-01-18 17:30:00 -08001273 if (tid == last_tid ||
1274 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1275 break;
Sujithe8324352009-01-16 21:38:42 +05301276 }
Ben Greear7755bad2011-01-18 17:30:00 -08001277
1278 if (!list_empty(&ac->tid_q)) {
1279 if (!ac->sched) {
1280 ac->sched = true;
1281 list_add_tail(&ac->list, &txq->axq_acq);
1282 }
1283 }
1284
1285 if (ac == last_ac ||
1286 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1287 return;
Sujithe8324352009-01-16 21:38:42 +05301288 }
1289}
1290
Sujithe8324352009-01-16 21:38:42 +05301291/***********/
1292/* TX, DMA */
1293/***********/
1294
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001295/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001296 * Insert a chain of ath_buf (descriptors) on a txq and
1297 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001298 */
Sujith102e0572008-10-29 10:15:16 +05301299static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1300 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001301{
Sujithcbe61d82009-02-09 13:27:12 +05301302 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001303 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001304 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301305
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001306 /*
1307 * Insert the frame on the outbound list and
1308 * pass it on to the hardware.
1309 */
1310
1311 if (list_empty(head))
1312 return;
1313
1314 bf = list_first_entry(head, struct ath_buf, list);
1315
Joe Perches226afe62010-12-02 19:12:37 -08001316 ath_dbg(common, ATH_DBG_QUEUE,
1317 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001318
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001319 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1320 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1321 list_splice_tail_init(head, &txq->txq_fifo_pending);
1322 return;
1323 }
1324 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
Joe Perches226afe62010-12-02 19:12:37 -08001325 ath_dbg(common, ATH_DBG_XMIT,
1326 "Initializing tx fifo %d which is non-empty\n",
1327 txq->txq_headidx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001328 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1329 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1330 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001331 TX_STAT_INC(txq->axq_qnum, puttxbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001332 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001333 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1334 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001335 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001336 list_splice_tail_init(head, &txq->axq_q);
1337
1338 if (txq->axq_link == NULL) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001339 TX_STAT_INC(txq->axq_qnum, puttxbuf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001340 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001341 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1342 txq->axq_qnum, ito64(bf->bf_daddr),
1343 bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001344 } else {
1345 *txq->axq_link = bf->bf_daddr;
Joe Perches226afe62010-12-02 19:12:37 -08001346 ath_dbg(common, ATH_DBG_XMIT,
1347 "link[%u] (%p)=%llx (%p)\n",
1348 txq->axq_qnum, txq->axq_link,
1349 ito64(bf->bf_daddr), bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001350 }
1351 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1352 &txq->axq_link);
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001353 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001354 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001355 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001356 txq->axq_depth++;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001357 if (bf_is_ampdu_not_probing(bf))
1358 txq->axq_ampdu_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001359}
1360
Sujithe8324352009-01-16 21:38:42 +05301361static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001362 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301363{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001364 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001365 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301366
Sujithe8324352009-01-16 21:38:42 +05301367 bf->bf_state.bf_type |= BUF_AMPDU;
1368
1369 /*
1370 * Do not queue to h/w when any of the following conditions is true:
1371 * - there are pending frames in software queue
1372 * - the TID is currently paused for ADDBA/BAR request
1373 * - seqno is not within block-ack window
1374 * - h/w queue depth exceeds low water mark
1375 */
1376 if (!list_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001377 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001378 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001379 /*
Sujithe8324352009-01-16 21:38:42 +05301380 * Add this frame to software queue for scheduling later
1381 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001382 */
Ben Greearbda8add2011-01-09 23:11:48 -08001383 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau04caf862010-11-14 15:20:12 +01001384 list_add_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301385 ath_tx_queue_tid(txctl->txq, tid);
1386 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001387 }
1388
Felix Fietkau04caf862010-11-14 15:20:12 +01001389 INIT_LIST_HEAD(&bf_head);
1390 list_add(&bf->list, &bf_head);
1391
Sujithe8324352009-01-16 21:38:42 +05301392 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001393 if (!fi->retries)
1394 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301395
1396 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001397 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301398 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001399 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkau04caf862010-11-14 15:20:12 +01001400 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
Sujithc4288392008-11-18 09:09:30 +05301401}
1402
Felix Fietkau82b873a2010-11-11 03:18:37 +01001403static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1404 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001405 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001406{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001407 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301408 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001409
Sujithe8324352009-01-16 21:38:42 +05301410 bf = list_first_entry(bf_head, struct ath_buf, list);
1411 bf->bf_state.bf_type &= ~BUF_AMPDU;
1412
1413 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001414 if (tid)
1415 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301416
Sujithd43f30152009-01-16 21:38:53 +05301417 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001418 fi = get_frame_info(bf->bf_mpdu);
1419 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +05301420 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301421 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001422}
1423
Sujith528f0c62008-10-29 10:14:26 +05301424static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001425{
Sujith528f0c62008-10-29 10:14:26 +05301426 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001427 enum ath9k_pkt_type htype;
1428 __le16 fc;
1429
Sujith528f0c62008-10-29 10:14:26 +05301430 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001431 fc = hdr->frame_control;
1432
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001433 if (ieee80211_is_beacon(fc))
1434 htype = ATH9K_PKT_TYPE_BEACON;
1435 else if (ieee80211_is_probe_resp(fc))
1436 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1437 else if (ieee80211_is_atim(fc))
1438 htype = ATH9K_PKT_TYPE_ATIM;
1439 else if (ieee80211_is_pspoll(fc))
1440 htype = ATH9K_PKT_TYPE_PSPOLL;
1441 else
1442 htype = ATH9K_PKT_TYPE_NORMAL;
1443
1444 return htype;
1445}
1446
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001447static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1448 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301449{
Felix Fietkau9ac586152011-01-24 19:23:18 +01001450 struct ath_softc *sc = hw->priv;
Sujith528f0c62008-10-29 10:14:26 +05301451 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001452 struct ieee80211_sta *sta = tx_info->control.sta;
1453 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301454 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001455 struct ath_frame_info *fi = get_frame_info(skb);
Sujith528f0c62008-10-29 10:14:26 +05301456 struct ath_node *an;
1457 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001458 enum ath9k_key_type keytype;
1459 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001460 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301461
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001462 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301463
Sujith528f0c62008-10-29 10:14:26 +05301464 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001465 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1466 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001467
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001468 an = (struct ath_node *) sta->drv_priv;
1469 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1470
1471 /*
1472 * Override seqno set by upper layer with the one
1473 * in tx aggregation state.
1474 */
1475 tid = ATH_AN_2_TID(an, tidno);
1476 seqno = tid->seq_next;
1477 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1478 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1479 }
1480
1481 memset(fi, 0, sizeof(*fi));
1482 if (hw_key)
1483 fi->keyix = hw_key->hw_key_idx;
1484 else
1485 fi->keyix = ATH9K_TXKEYIX_INVALID;
1486 fi->keytype = keytype;
1487 fi->framelen = framelen;
1488 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301489}
1490
Felix Fietkau82b873a2010-11-11 03:18:37 +01001491static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301492{
1493 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1494 int flags = 0;
1495
1496 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1497 flags |= ATH9K_TXDESC_INTREQ;
1498
1499 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1500 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301501
Felix Fietkau82b873a2010-11-11 03:18:37 +01001502 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001503 flags |= ATH9K_TXDESC_LDPC;
1504
Sujith528f0c62008-10-29 10:14:26 +05301505 return flags;
1506}
1507
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001508/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001509 * rix - rate index
1510 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1511 * width - 0 for 20 MHz, 1 for 40 MHz
1512 * half_gi - to use 4us v/s 3.6 us for symbol time
1513 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001514static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301515 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001516{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001517 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001518 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301519
1520 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001521 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001522 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001523 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001524 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1525
1526 if (!half_gi)
1527 duration = SYMBOL_TIME(nsymbols);
1528 else
1529 duration = SYMBOL_TIME_HALFGI(nsymbols);
1530
Sujithe63835b2008-11-18 09:07:53 +05301531 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001532 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301533
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001534 return duration;
1535}
1536
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301537u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1538{
1539 struct ath_hw *ah = sc->sc_ah;
1540 struct ath9k_channel *curchan = ah->curchan;
1541 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1542 (curchan->channelFlags & CHANNEL_5GHZ) &&
1543 (chainmask == 0x7) && (rate < 0x90))
1544 return 0x3;
1545 else
1546 return chainmask;
1547}
1548
Felix Fietkau269c44b2010-11-14 15:20:06 +01001549static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001550{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001551 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001552 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301553 struct sk_buff *skb;
1554 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301555 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001556 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301557 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301558 int i, flags = 0;
1559 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301560 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301561
1562 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301563
Sujitha22be222009-03-30 15:28:36 +05301564 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301565 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301566 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301567 hdr = (struct ieee80211_hdr *)skb->data;
1568 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301569
Sujithc89424d2009-01-30 14:29:28 +05301570 /*
1571 * We check if Short Preamble is needed for the CTS rate by
1572 * checking the BSS's global flag.
1573 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1574 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001575 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1576 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301577 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001578 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001579
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001580 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001581 bool is_40, is_sgi, is_sp;
1582 int phy;
1583
Sujithe63835b2008-11-18 09:07:53 +05301584 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001585 continue;
1586
Sujitha8efee42008-11-18 09:07:30 +05301587 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301588 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001589
Felix Fietkau27032052010-01-17 21:08:50 +01001590 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1591 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301592 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001593 flags |= ATH9K_TXDESC_RTSENA;
1594 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1595 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1596 flags |= ATH9K_TXDESC_CTSENA;
1597 }
1598
Sujithc89424d2009-01-30 14:29:28 +05301599 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1600 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1601 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1602 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001603
Felix Fietkau545750d2009-11-23 22:21:01 +01001604 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1605 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1606 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1607
1608 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1609 /* MCS rates */
1610 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301611 series[i].ChSel = ath_txchainmask_reduction(sc,
1612 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001613 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001614 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001615 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1616 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001617 continue;
1618 }
1619
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301620 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001621 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1622 !(rate->flags & IEEE80211_RATE_ERP_G))
1623 phy = WLAN_RC_PHY_CCK;
1624 else
1625 phy = WLAN_RC_PHY_OFDM;
1626
1627 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1628 series[i].Rate = rate->hw_value;
1629 if (rate->hw_value_short) {
1630 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1631 series[i].Rate |= rate->hw_value_short;
1632 } else {
1633 is_sp = false;
1634 }
1635
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301636 if (bf->bf_state.bfs_paprd)
1637 series[i].ChSel = common->tx_chainmask;
1638 else
1639 series[i].ChSel = ath_txchainmask_reduction(sc,
1640 common->tx_chainmask, series[i].Rate);
1641
Felix Fietkau545750d2009-11-23 22:21:01 +01001642 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001643 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001644 }
1645
Felix Fietkau27032052010-01-17 21:08:50 +01001646 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001647 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001648 flags &= ~ATH9K_TXDESC_RTSENA;
1649
1650 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1651 if (flags & ATH9K_TXDESC_RTSENA)
1652 flags &= ~ATH9K_TXDESC_CTSENA;
1653
Sujithe63835b2008-11-18 09:07:53 +05301654 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301655 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1656 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301657 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301658 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301659
Sujith17d79042009-02-09 13:27:03 +05301660 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301661 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001662}
1663
Felix Fietkau82b873a2010-11-11 03:18:37 +01001664static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001665 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001666 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301667{
Felix Fietkau9ac586152011-01-24 19:23:18 +01001668 struct ath_softc *sc = hw->priv;
Felix Fietkau04caf862010-11-14 15:20:12 +01001669 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001670 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001671 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001672 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001673 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001674 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001675
1676 bf = ath_tx_get_buffer(sc);
1677 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001678 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001679 return NULL;
1680 }
Sujithe8324352009-01-16 21:38:42 +05301681
Sujithe8324352009-01-16 21:38:42 +05301682 ATH_TXBUF_RESET(bf);
1683
Felix Fietkau82b873a2010-11-11 03:18:37 +01001684 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301685 bf->bf_mpdu = skb;
1686
Ben Greearc1739eb32010-10-14 12:45:29 -07001687 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1688 skb->len, DMA_TO_DEVICE);
1689 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301690 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001691 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001692 ath_err(ath9k_hw_common(sc->sc_ah),
1693 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001694 ath_tx_return_buffer(sc, bf);
1695 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301696 }
1697
Sujithe8324352009-01-16 21:38:42 +05301698 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301699
1700 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001701 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301702
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001703 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1704 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301705
1706 ath9k_hw_filltxdesc(ah, ds,
1707 skb->len, /* segment length */
1708 true, /* first segment */
1709 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001710 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001711 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001712 txq->axq_qnum);
1713
1714
1715 return bf;
1716}
1717
1718/* FIXME: tx power */
1719static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1720 struct ath_tx_control *txctl)
1721{
1722 struct sk_buff *skb = bf->bf_mpdu;
1723 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1724 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001725 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001726 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001727 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301728
Sujithe8324352009-01-16 21:38:42 +05301729 spin_lock_bh(&txctl->txq->axq_lock);
1730
Felix Fietkau248a38d2010-12-10 21:16:46 +01001731 if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001732 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1733 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001734 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001735
Felix Fietkau066dae92010-11-07 14:59:39 +01001736 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001737 }
1738
1739 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001740 /*
1741 * Try aggregation if it's a unicast data frame
1742 * and the destination is HT capable.
1743 */
1744 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301745 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001746 INIT_LIST_HEAD(&bf_head);
1747 list_add_tail(&bf->list, &bf_head);
1748
Felix Fietkau61117f012010-11-11 03:18:36 +01001749 bf->bf_state.bfs_ftype = txctl->frame_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001750 bf->bf_state.bfs_paprd = txctl->paprd;
1751
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001752 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001753 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1754 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001755
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301756 if (txctl->paprd)
1757 bf->bf_state.bfs_paprd_timestamp = jiffies;
1758
Felix Fietkau248a38d2010-12-10 21:16:46 +01001759 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301760 }
1761
1762 spin_unlock_bh(&txctl->txq->axq_lock);
1763}
1764
1765/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001766int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301767 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001768{
Felix Fietkau28d16702010-11-14 15:20:10 +01001769 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1770 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001771 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001772 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001773 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001774 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001775 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001776 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001777 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001778
Ben Greeara9927ba2010-12-06 21:13:49 -08001779 /* NOTE: sta can be NULL according to net/mac80211.h */
1780 if (sta)
1781 txctl->an = (struct ath_node *)sta->drv_priv;
1782
Felix Fietkau04caf862010-11-14 15:20:12 +01001783 if (info->control.hw_key)
1784 frmlen += info->control.hw_key->icv_len;
1785
Felix Fietkau28d16702010-11-14 15:20:10 +01001786 /*
1787 * As a temporary workaround, assign seq# here; this will likely need
1788 * to be cleaned up to work better with Beacon transmission and virtual
1789 * BSSes.
1790 */
1791 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1792 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1793 sc->tx.seq_no += 0x10;
1794 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1795 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1796 }
1797
1798 /* Add the padding after the header if this is not already done */
1799 padpos = ath9k_cmn_padpos(hdr->frame_control);
1800 padsize = padpos & 3;
1801 if (padsize && skb->len > padpos) {
1802 if (skb_headroom(skb) < padsize)
1803 return -ENOMEM;
1804
1805 skb_push(skb, padsize);
1806 memmove(skb->data, skb->data + padsize, padpos);
1807 }
1808
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001809 setup_frame_info(hw, skb, frmlen);
1810
1811 /*
1812 * At this point, the vif, hw_key and sta pointers in the tx control
1813 * info are no longer valid (overwritten by the ath_frame_info data.
1814 */
1815
1816 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001817 if (unlikely(!bf))
1818 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001819
Felix Fietkau066dae92010-11-07 14:59:39 +01001820 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001821 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001822 if (txq == sc->tx.txq_map[q] &&
1823 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001824 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001825 txq->stopped = 1;
1826 }
1827 spin_unlock_bh(&txq->axq_lock);
1828
Sujithe8324352009-01-16 21:38:42 +05301829 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001830
1831 return 0;
1832}
1833
Sujithe8324352009-01-16 21:38:42 +05301834/*****************/
1835/* TX Completion */
1836/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001837
Sujithe8324352009-01-16 21:38:42 +05301838static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001839 int tx_flags, int ftype, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001840{
Sujithe8324352009-01-16 21:38:42 +05301841 struct ieee80211_hw *hw = sc->hw;
1842 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001843 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001844 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001845 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301846
Joe Perches226afe62010-12-02 19:12:37 -08001847 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301848
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301849 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301850 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301851
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301852 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301853 /* Frame was ACKed */
1854 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1855 }
1856
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001857 padpos = ath9k_cmn_padpos(hdr->frame_control);
1858 padsize = padpos & 3;
1859 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301860 /*
1861 * Remove MAC header padding before giving the frame back to
1862 * mac80211.
1863 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001864 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301865 skb_pull(skb, padsize);
1866 }
1867
Sujith1b04b932010-01-08 10:36:05 +05301868 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1869 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001870 ath_dbg(common, ATH_DBG_PS,
1871 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301872 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1873 PS_WAIT_FOR_CAB |
1874 PS_WAIT_FOR_PSPOLL_DATA |
1875 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001876 }
1877
Felix Fietkau7545daf2011-01-24 19:23:16 +01001878 q = skb_get_queue_mapping(skb);
1879 if (txq == sc->tx.txq_map[q]) {
1880 spin_lock_bh(&txq->axq_lock);
1881 if (WARN_ON(--txq->pending_frames < 0))
1882 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001883
Felix Fietkau7545daf2011-01-24 19:23:16 +01001884 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1885 ieee80211_wake_queue(sc->hw, q);
1886 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001887 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001888 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001889 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001890
1891 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301892}
1893
1894static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001895 struct ath_txq *txq, struct list_head *bf_q,
1896 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301897{
1898 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301899 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301900 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301901
Sujithe8324352009-01-16 21:38:42 +05301902 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301903 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301904
1905 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301906 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301907
1908 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301909 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301910 }
1911
Ben Greearc1739eb32010-10-14 12:45:29 -07001912 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001913 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001914
1915 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301916 if (time_after(jiffies,
1917 bf->bf_state.bfs_paprd_timestamp +
1918 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001919 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001920 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001921 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001922 } else {
Felix Fietkau5bec3e52011-01-24 21:29:25 +01001923 ath_debug_stat_tx(sc, bf, ts, txq);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001924 ath_tx_complete(sc, skb, tx_flags,
Felix Fietkau61117f012010-11-11 03:18:36 +01001925 bf->bf_state.bfs_ftype, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001926 }
Ben Greear6cf9e992010-10-14 12:45:30 -07001927 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1928 * accidentally reference it later.
1929 */
1930 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05301931
1932 /*
1933 * Return the list of ath_buf of this mpdu to free queue
1934 */
1935 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1936 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1937 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1938}
1939
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001940static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
1941 struct ath_tx_status *ts, int nframes, int nbad,
1942 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05301943{
Sujitha22be222009-03-30 15:28:36 +05301944 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05301945 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05301946 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001947 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001948 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301949 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05301950
Sujith95e4acb2009-03-13 08:56:09 +05301951 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001952 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05301953
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001954 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301955 WARN_ON(tx_rateindex >= hw->max_rates);
1956
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001957 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05301958 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02001959 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01001960 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05301961
Felix Fietkaub572d032010-11-14 15:20:07 +01001962 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02001963
Felix Fietkaub572d032010-11-14 15:20:07 +01001964 tx_info->status.ampdu_len = nframes;
1965 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02001966 }
1967
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001968 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301969 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01001970 /*
1971 * If an underrun error is seen assume it as an excessive
1972 * retry only if max frame trigger level has been reached
1973 * (2 KB for single stream, and 4 KB for dual stream).
1974 * Adjust the long retry as if the frame was tried
1975 * hw->max_rate_tries times to affect how rate control updates
1976 * PER for the failed rate.
1977 * In case of congestion on the bus penalizing this type of
1978 * underruns should help hardware actually transmit new frames
1979 * successfully by eventually preferring slower rates.
1980 * This itself should also alleviate congestion on the bus.
1981 */
1982 if (ieee80211_is_data(hdr->frame_control) &&
1983 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1984 ATH9K_TX_DELIM_UNDERRUN)) &&
1985 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1986 tx_info->status.rates[tx_rateindex].count =
1987 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05301988 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301989
Felix Fietkau545750d2009-11-23 22:21:01 +01001990 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301991 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01001992 tx_info->status.rates[i].idx = -1;
1993 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05301994
Felix Fietkau78c46532010-06-25 01:26:16 +02001995 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05301996}
1997
Sujithc4288392008-11-18 09:09:30 +05301998static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001999{
Sujithcbe61d82009-02-09 13:27:12 +05302000 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002001 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002002 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2003 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302004 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002005 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302006 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002007 int status;
2008
Joe Perches226afe62010-12-02 19:12:37 -08002009 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2010 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2011 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002012
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002013 for (;;) {
2014 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002015 if (list_empty(&txq->axq_q)) {
2016 txq->axq_link = NULL;
Vasanthakumar Thiagarajan69081622011-02-19 01:13:42 -08002017 if (sc->sc_flags & SC_OP_TXAGGR &&
2018 !txq->txq_flush_inprogress)
Ben Greear082f6532011-01-09 23:11:47 -08002019 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002020 spin_unlock_bh(&txq->axq_lock);
2021 break;
2022 }
2023 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2024
2025 /*
2026 * There is a race condition that a BH gets scheduled
2027 * after sw writes TxE and before hw re-load the last
2028 * descriptor to get the newly chained one.
2029 * Software must keep the last DONE descriptor as a
2030 * holding descriptor - software does so by marking
2031 * it with the STALE flag.
2032 */
2033 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302034 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002035 bf_held = bf;
2036 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302037 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002038 break;
2039 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002040 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302041 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002042 }
2043 }
2044
2045 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302046 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002047
Felix Fietkau29bffa92010-03-29 20:14:23 -07002048 memset(&ts, 0, sizeof(ts));
2049 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002050 if (status == -EINPROGRESS) {
2051 spin_unlock_bh(&txq->axq_lock);
2052 break;
2053 }
Ben Greear2dac4fb2011-01-09 23:11:45 -08002054 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002055
2056 /*
2057 * Remove ath_buf's of the same transmit unit from txq,
2058 * however leave the last descriptor back as the holding
2059 * descriptor for hw.
2060 */
Sujitha119cc42009-03-30 15:28:38 +05302061 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002062 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002063 if (!list_is_singular(&lastbf->list))
2064 list_cut_position(&bf_head,
2065 &txq->axq_q, lastbf->list.prev);
2066
2067 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002068 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002069 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002070 if (bf_held)
2071 list_del(&bf_held->list);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002072
2073 if (bf_is_ampdu_not_probing(bf))
2074 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajan69081622011-02-19 01:13:42 -08002075
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002076 spin_unlock_bh(&txq->axq_lock);
2077
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002078 if (bf_held)
2079 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002080
Sujithcd3d39a2008-08-11 14:03:34 +05302081 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002082 /*
2083 * This frame is sent out as a single frame.
2084 * Use hardware retry status for this frame.
2085 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002086 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302087 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002088 ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002089 }
Johannes Berge6a98542008-10-21 12:40:02 +02002090
Sujithcd3d39a2008-08-11 14:03:34 +05302091 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002092 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2093 true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002094 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002095 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002096
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002097 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002098
Vasanthakumar Thiagarajan69081622011-02-19 01:13:42 -08002099 if (sc->sc_flags & SC_OP_TXAGGR && !txq->txq_flush_inprogress)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002100 ath_txq_schedule(sc, txq);
2101 spin_unlock_bh(&txq->axq_lock);
2102 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002103}
2104
Vivek Natarajan181fb182011-01-27 14:45:08 +05302105static void ath_hw_pll_work(struct work_struct *work)
2106{
2107 struct ath_softc *sc = container_of(work, struct ath_softc,
2108 hw_pll_work.work);
2109 static int count;
2110
2111 if (AR_SREV_9485(sc->sc_ah)) {
2112 if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
2113 count++;
2114
2115 if (count == 3) {
2116 /* Rx is hung for more than 500ms. Reset it */
2117 ath_reset(sc, true);
2118 count = 0;
2119 }
2120 } else
2121 count = 0;
2122
2123 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
2124 }
2125}
2126
Sujith305fe472009-07-23 15:32:29 +05302127static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002128{
2129 struct ath_softc *sc = container_of(work, struct ath_softc,
2130 tx_complete_work.work);
2131 struct ath_txq *txq;
2132 int i;
2133 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002134#ifdef CONFIG_ATH9K_DEBUGFS
2135 sc->tx_complete_poll_work_seen++;
2136#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002137
2138 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2139 if (ATH_TXQ_SETUP(sc, i)) {
2140 txq = &sc->tx.txq[i];
2141 spin_lock_bh(&txq->axq_lock);
2142 if (txq->axq_depth) {
2143 if (txq->axq_tx_inprogress) {
2144 needreset = true;
2145 spin_unlock_bh(&txq->axq_lock);
2146 break;
2147 } else {
2148 txq->axq_tx_inprogress = true;
2149 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08002150 } else {
2151 /* If the queue has pending buffers, then it
2152 * should be doing tx work (and have axq_depth).
2153 * Shouldn't get to this state I think..but
2154 * we do.
2155 */
2156 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
2157 (txq->pending_frames > 0 ||
2158 !list_empty(&txq->axq_acq) ||
2159 txq->stopped)) {
2160 ath_err(ath9k_hw_common(sc->sc_ah),
2161 "txq: %p axq_qnum: %u,"
2162 " mac80211_qnum: %i"
2163 " axq_link: %p"
2164 " pending frames: %i"
2165 " axq_acq empty: %i"
2166 " stopped: %i"
2167 " axq_depth: 0 Attempting to"
2168 " restart tx logic.\n",
2169 txq, txq->axq_qnum,
2170 txq->mac80211_qnum,
2171 txq->axq_link,
2172 txq->pending_frames,
2173 list_empty(&txq->axq_acq),
2174 txq->stopped);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002175 ath_txq_schedule(sc, txq);
2176 }
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002177 }
2178 spin_unlock_bh(&txq->axq_lock);
2179 }
2180
2181 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002182 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2183 "tx hung, resetting the chip\n");
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002184 ath_reset(sc, true);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002185 }
2186
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002187 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002188 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2189}
2190
2191
Sujithe8324352009-01-16 21:38:42 +05302192
2193void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002194{
Sujithe8324352009-01-16 21:38:42 +05302195 int i;
2196 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002197
Sujithe8324352009-01-16 21:38:42 +05302198 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002199
2200 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302201 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2202 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002203 }
2204}
2205
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002206void ath_tx_edma_tasklet(struct ath_softc *sc)
2207{
2208 struct ath_tx_status txs;
2209 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2210 struct ath_hw *ah = sc->sc_ah;
2211 struct ath_txq *txq;
2212 struct ath_buf *bf, *lastbf;
2213 struct list_head bf_head;
2214 int status;
2215 int txok;
2216
2217 for (;;) {
2218 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2219 if (status == -EINPROGRESS)
2220 break;
2221 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002222 ath_dbg(common, ATH_DBG_XMIT,
2223 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002224 break;
2225 }
2226
2227 /* Skip beacon completions */
2228 if (txs.qid == sc->beacon.beaconq)
2229 continue;
2230
2231 txq = &sc->tx.txq[txs.qid];
2232
2233 spin_lock_bh(&txq->axq_lock);
2234 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2235 spin_unlock_bh(&txq->axq_lock);
2236 return;
2237 }
2238
2239 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2240 struct ath_buf, list);
2241 lastbf = bf->bf_lastbf;
2242
2243 INIT_LIST_HEAD(&bf_head);
2244 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2245 &lastbf->list);
2246 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2247 txq->axq_depth--;
2248 txq->axq_tx_inprogress = false;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002249 if (bf_is_ampdu_not_probing(bf))
2250 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002251 spin_unlock_bh(&txq->axq_lock);
2252
2253 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2254
2255 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002256 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2257 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002258 ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002259 }
2260
2261 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002262 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2263 txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002264 else
2265 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2266 &txs, txok, 0);
2267
2268 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002269
Vasanthakumar Thiagarajan69081622011-02-19 01:13:42 -08002270 if (!txq->txq_flush_inprogress) {
2271 if (!list_empty(&txq->txq_fifo_pending)) {
2272 INIT_LIST_HEAD(&bf_head);
2273 bf = list_first_entry(&txq->txq_fifo_pending,
2274 struct ath_buf, list);
2275 list_cut_position(&bf_head,
2276 &txq->txq_fifo_pending,
2277 &bf->bf_lastbf->list);
2278 ath_tx_txqaddbuf(sc, txq, &bf_head);
2279 } else if (sc->sc_flags & SC_OP_TXAGGR)
2280 ath_txq_schedule(sc, txq);
2281 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002282 spin_unlock_bh(&txq->axq_lock);
2283 }
2284}
2285
Sujithe8324352009-01-16 21:38:42 +05302286/*****************/
2287/* Init, Cleanup */
2288/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002289
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002290static int ath_txstatus_setup(struct ath_softc *sc, int size)
2291{
2292 struct ath_descdma *dd = &sc->txsdma;
2293 u8 txs_len = sc->sc_ah->caps.txs_len;
2294
2295 dd->dd_desc_len = size * txs_len;
2296 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2297 &dd->dd_desc_paddr, GFP_KERNEL);
2298 if (!dd->dd_desc)
2299 return -ENOMEM;
2300
2301 return 0;
2302}
2303
2304static int ath_tx_edma_init(struct ath_softc *sc)
2305{
2306 int err;
2307
2308 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2309 if (!err)
2310 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2311 sc->txsdma.dd_desc_paddr,
2312 ATH_TXSTATUS_RING_SIZE);
2313
2314 return err;
2315}
2316
2317static void ath_tx_edma_cleanup(struct ath_softc *sc)
2318{
2319 struct ath_descdma *dd = &sc->txsdma;
2320
2321 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2322 dd->dd_desc_paddr);
2323}
2324
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002325int ath_tx_init(struct ath_softc *sc, int nbufs)
2326{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002327 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002328 int error = 0;
2329
Sujith797fe5cb2009-03-30 15:28:45 +05302330 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002331
Sujith797fe5cb2009-03-30 15:28:45 +05302332 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002333 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302334 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002335 ath_err(common,
2336 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302337 goto err;
2338 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002339
Sujith797fe5cb2009-03-30 15:28:45 +05302340 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002341 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302342 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002343 ath_err(common,
2344 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302345 goto err;
2346 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002347
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002348 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
Vivek Natarajan181fb182011-01-27 14:45:08 +05302349 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002350
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002351 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2352 error = ath_tx_edma_init(sc);
2353 if (error)
2354 goto err;
2355 }
2356
Sujith797fe5cb2009-03-30 15:28:45 +05302357err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002358 if (error != 0)
2359 ath_tx_cleanup(sc);
2360
2361 return error;
2362}
2363
Sujith797fe5cb2009-03-30 15:28:45 +05302364void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002365{
Sujithb77f4832008-12-07 21:44:03 +05302366 if (sc->beacon.bdma.dd_desc_len != 0)
2367 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002368
Sujithb77f4832008-12-07 21:44:03 +05302369 if (sc->tx.txdma.dd_desc_len != 0)
2370 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002371
2372 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2373 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002374}
2375
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002376void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2377{
Sujithc5170162008-10-29 10:13:59 +05302378 struct ath_atx_tid *tid;
2379 struct ath_atx_ac *ac;
2380 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002381
Sujith8ee5afb2008-12-07 21:43:36 +05302382 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302383 tidno < WME_NUM_TID;
2384 tidno++, tid++) {
2385 tid->an = an;
2386 tid->tidno = tidno;
2387 tid->seq_start = tid->seq_next = 0;
2388 tid->baw_size = WME_MAX_BA;
2389 tid->baw_head = tid->baw_tail = 0;
2390 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302391 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302392 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302393 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302394 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302395 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302396 tid->state &= ~AGGR_ADDBA_COMPLETE;
2397 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302398 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002399
Sujith8ee5afb2008-12-07 21:43:36 +05302400 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302401 acno < WME_NUM_AC; acno++, ac++) {
2402 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002403 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302404 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002405 }
2406}
2407
Sujithb5aa9bf2008-10-29 10:13:31 +05302408void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002409{
Felix Fietkau2b409942010-07-07 19:42:08 +02002410 struct ath_atx_ac *ac;
2411 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002412 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002413 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302414
Felix Fietkau2b409942010-07-07 19:42:08 +02002415 for (tidno = 0, tid = &an->tid[tidno];
2416 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002417
Felix Fietkau2b409942010-07-07 19:42:08 +02002418 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002419 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002420
Felix Fietkau2b409942010-07-07 19:42:08 +02002421 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002422
Felix Fietkau2b409942010-07-07 19:42:08 +02002423 if (tid->sched) {
2424 list_del(&tid->list);
2425 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002426 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002427
2428 if (ac->sched) {
2429 list_del(&ac->list);
2430 tid->ac->sched = false;
2431 }
2432
2433 ath_tid_drain(sc, txq, tid);
2434 tid->state &= ~AGGR_ADDBA_COMPLETE;
2435 tid->state &= ~AGGR_CLEANUP;
2436
2437 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002438 }
2439}