blob: 65d46c6ebced44245d2071dca7b400f71b4fa70e [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujithcee075a2009-03-13 09:07:23 +05302 * Copyright (c) 2008-2009 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Sujith394cf0a2009-02-09 13:26:54 +053017#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040018#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070019
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070022#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
23#define L_STF 8
24#define L_LTF 8
25#define L_SIG 4
26#define HT_SIG 8
27#define HT_STF 4
28#define HT_LTF(_ns) (4 * (_ns))
29#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
33
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070034
Felix Fietkauc6663872010-04-19 19:57:33 +020035static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070036 /* 20MHz 40MHz */
37 { 26, 54 }, /* 0: BPSK */
38 { 52, 108 }, /* 1: QPSK 1/2 */
39 { 78, 162 }, /* 2: QPSK 3/4 */
40 { 104, 216 }, /* 3: 16-QAM 1/2 */
41 { 156, 324 }, /* 4: 16-QAM 3/4 */
42 { 208, 432 }, /* 5: 64-QAM 2/3 */
43 { 234, 486 }, /* 6: 64-QAM 3/4 */
44 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070045};
46
47#define IS_HT_RATE(_rate) ((_rate) & 0x80)
48
Felix Fietkau82b873a2010-11-11 03:18:37 +010049static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
50 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +010051 struct list_head *bf_head);
Sujithe8324352009-01-16 21:38:42 +053052static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070053 struct ath_txq *txq, struct list_head *bf_q,
54 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053055static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
56 struct list_head *head);
Felix Fietkau269c44b2010-11-14 15:20:06 +010057static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010058static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
59 struct ath_tx_status *ts, int nframes, int nbad,
60 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020061static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
62 int seqno);
Sujithe8324352009-01-16 21:38:42 +053063
Felix Fietkau545750d2009-11-23 22:21:01 +010064enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020065 MCS_HT20,
66 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010067 MCS_HT40,
68 MCS_HT40_SGI,
69};
70
Felix Fietkau0e668cd2010-04-19 19:57:32 +020071static int ath_max_4ms_framelen[4][32] = {
72 [MCS_HT20] = {
73 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
74 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
75 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
76 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
77 },
78 [MCS_HT20_SGI] = {
79 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
80 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
81 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
82 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010083 },
84 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020085 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
86 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
87 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
88 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010089 },
90 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020091 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
92 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
93 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
94 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010095 }
96};
97
Sujithe8324352009-01-16 21:38:42 +053098/*********************/
99/* Aggregation logic */
100/*********************/
101
Sujithe8324352009-01-16 21:38:42 +0530102static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
103{
104 struct ath_atx_ac *ac = tid->ac;
105
106 if (tid->paused)
107 return;
108
109 if (tid->sched)
110 return;
111
112 tid->sched = true;
113 list_add_tail(&tid->list, &ac->tid_q);
114
115 if (ac->sched)
116 return;
117
118 ac->sched = true;
119 list_add_tail(&ac->list, &txq->axq_acq);
120}
121
Sujithe8324352009-01-16 21:38:42 +0530122static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
123{
Felix Fietkau066dae92010-11-07 14:59:39 +0100124 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530125
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200126 WARN_ON(!tid->paused);
127
Sujithe8324352009-01-16 21:38:42 +0530128 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200129 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530130
131 if (list_empty(&tid->buf_q))
132 goto unlock;
133
134 ath_tx_queue_tid(txq, tid);
135 ath_txq_schedule(sc, txq);
136unlock:
137 spin_unlock_bh(&txq->axq_lock);
138}
139
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100140static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100141{
142 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100143 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
144 sizeof(tx_info->rate_driver_data));
145 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100146}
147
Sujithe8324352009-01-16 21:38:42 +0530148static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
149{
Felix Fietkau066dae92010-11-07 14:59:39 +0100150 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530151 struct ath_buf *bf;
152 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200153 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100154 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200155
Sujithe8324352009-01-16 21:38:42 +0530156 INIT_LIST_HEAD(&bf_head);
157
Felix Fietkau90fa5392010-09-20 13:45:38 +0200158 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530159 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530160
161 while (!list_empty(&tid->buf_q)) {
162 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530163 list_move_tail(&bf->list, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200164
Felix Fietkaue1566d12010-11-20 03:08:46 +0100165 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100166 fi = get_frame_info(bf->bf_mpdu);
167 if (fi->retries) {
168 ath_tx_update_baw(sc, tid, fi->seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100169 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200170 } else {
Felix Fietkaua9e99a02011-01-10 17:05:47 -0700171 ath_tx_send_normal(sc, txq, NULL, &bf_head);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200172 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100173 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530174 }
175
176 spin_unlock_bh(&txq->axq_lock);
177}
178
179static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
180 int seqno)
181{
182 int index, cindex;
183
184 index = ATH_BA_INDEX(tid->seq_start, seqno);
185 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
186
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200187 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530188
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200189 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530190 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
191 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
192 }
193}
194
195static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100196 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530197{
198 int index, cindex;
199
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100200 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530201 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200202 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530203
204 if (index >= ((tid->baw_tail - tid->baw_head) &
205 (ATH_TID_MAX_BUFS - 1))) {
206 tid->baw_tail = cindex;
207 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
208 }
209}
210
211/*
212 * TODO: For frame(s) that are in the retry state, we will reuse the
213 * sequence number(s) without setting the retry bit. The
214 * alternative is to give up on these and BAR the receiver's window
215 * forward.
216 */
217static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
218 struct ath_atx_tid *tid)
219
220{
221 struct ath_buf *bf;
222 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700223 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100224 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700225
226 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530227 INIT_LIST_HEAD(&bf_head);
228
229 for (;;) {
230 if (list_empty(&tid->buf_q))
231 break;
Sujithe8324352009-01-16 21:38:42 +0530232
Sujithd43f30152009-01-16 21:38:53 +0530233 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
234 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530235
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100236 fi = get_frame_info(bf->bf_mpdu);
237 if (fi->retries)
238 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530239
240 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700241 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530242 spin_lock(&txq->axq_lock);
243 }
244
245 tid->seq_next = tid->seq_start;
246 tid->baw_tail = tid->baw_head;
247}
248
Sujithfec247c2009-07-27 12:08:16 +0530249static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100250 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530251{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100252 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530253 struct ieee80211_hdr *hdr;
254
Sujithfec247c2009-07-27 12:08:16 +0530255 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100256 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100257 return;
Sujithe8324352009-01-16 21:38:42 +0530258
Sujithe8324352009-01-16 21:38:42 +0530259 hdr = (struct ieee80211_hdr *)skb->data;
260 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
261}
262
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200263static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
264{
265 struct ath_buf *bf = NULL;
266
267 spin_lock_bh(&sc->tx.txbuflock);
268
269 if (unlikely(list_empty(&sc->tx.txbuf))) {
270 spin_unlock_bh(&sc->tx.txbuflock);
271 return NULL;
272 }
273
274 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
275 list_del(&bf->list);
276
277 spin_unlock_bh(&sc->tx.txbuflock);
278
279 return bf;
280}
281
282static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
283{
284 spin_lock_bh(&sc->tx.txbuflock);
285 list_add_tail(&bf->list, &sc->tx.txbuf);
286 spin_unlock_bh(&sc->tx.txbuflock);
287}
288
Sujithd43f30152009-01-16 21:38:53 +0530289static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
290{
291 struct ath_buf *tbf;
292
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200293 tbf = ath_tx_get_buffer(sc);
294 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530295 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530296
297 ATH_TXBUF_RESET(tbf);
298
299 tbf->bf_mpdu = bf->bf_mpdu;
300 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400301 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530302 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530303
304 return tbf;
305}
306
Felix Fietkaub572d032010-11-14 15:20:07 +0100307static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
308 struct ath_tx_status *ts, int txok,
309 int *nframes, int *nbad)
310{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100311 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100312 u16 seq_st = 0;
313 u32 ba[WME_BA_BMP_SIZE >> 5];
314 int ba_index;
315 int isaggr = 0;
316
317 *nbad = 0;
318 *nframes = 0;
319
Felix Fietkaub572d032010-11-14 15:20:07 +0100320 isaggr = bf_isaggr(bf);
321 if (isaggr) {
322 seq_st = ts->ts_seqnum;
323 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
324 }
325
326 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100327 fi = get_frame_info(bf->bf_mpdu);
328 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100329
330 (*nframes)++;
331 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
332 (*nbad)++;
333
334 bf = bf->bf_next;
335 }
336}
337
338
Sujithd43f30152009-01-16 21:38:53 +0530339static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
340 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100341 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530342{
343 struct ath_node *an = NULL;
344 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530345 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100346 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530347 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800348 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530349 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530350 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +0530351 struct list_head bf_head, bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530352 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530353 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530354 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
355 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200356 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100357 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200358 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100359 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200360 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530361
Sujitha22be222009-03-30 15:28:36 +0530362 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530363 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530364
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800365 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800366
Felix Fietkau78c46532010-06-25 01:26:16 +0200367 memcpy(rates, tx_info->control.rates, sizeof(rates));
368
Sujith1286ec62009-01-27 13:30:37 +0530369 rcu_read_lock();
370
Ben Greear686b9cb2010-09-23 09:44:36 -0700371 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530372 if (!sta) {
373 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200374
Felix Fietkau31e79a52010-07-12 23:16:34 +0200375 INIT_LIST_HEAD(&bf_head);
376 while (bf) {
377 bf_next = bf->bf_next;
378
379 bf->bf_state.bf_type |= BUF_XRETRY;
380 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
381 !bf->bf_stale || bf_next != NULL)
382 list_move_tail(&bf->list, &bf_head);
383
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100384 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200385 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
386 0, 0);
387
388 bf = bf_next;
389 }
Sujith1286ec62009-01-27 13:30:37 +0530390 return;
Sujithe8324352009-01-16 21:38:42 +0530391 }
392
Sujith1286ec62009-01-27 13:30:37 +0530393 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100394 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
395 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530396
Felix Fietkaub11b1602010-07-11 12:48:44 +0200397 /*
398 * The hardware occasionally sends a tx status for the wrong TID.
399 * In this case, the BA status cannot be considered valid and all
400 * subframes need to be retransmitted
401 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100402 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200403 txok = false;
404
Sujithe8324352009-01-16 21:38:42 +0530405 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530406 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530407
Sujithd43f30152009-01-16 21:38:53 +0530408 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700409 if (ts->ts_flags & ATH9K_TX_BA) {
410 seq_st = ts->ts_seqnum;
411 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530412 } else {
Sujithd43f30152009-01-16 21:38:53 +0530413 /*
414 * AR5416 can become deaf/mute when BA
415 * issue happens. Chip needs to be reset.
416 * But AP code may have sychronization issues
417 * when perform internal reset in this routine.
418 * Only enable reset in STA mode for now.
419 */
Sujith2660b812009-02-09 13:27:26 +0530420 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530421 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530422 }
423 }
424
425 INIT_LIST_HEAD(&bf_pending);
426 INIT_LIST_HEAD(&bf_head);
427
Felix Fietkaub572d032010-11-14 15:20:07 +0100428 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530429 while (bf) {
Felix Fietkauf0b82202011-01-15 14:30:15 +0100430 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530431 bf_next = bf->bf_next;
432
Felix Fietkau78c46532010-06-25 01:26:16 +0200433 skb = bf->bf_mpdu;
434 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100435 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200436
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100437 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530438 /* transmit completion, subframe is
439 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530440 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530441 } else if (!isaggr && txok) {
442 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530443 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530444 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200445 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530446 /*
447 * cleanup in progress, just fail
448 * the un-acked sub-frames
449 */
450 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200451 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
452 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
453 !an->sleeping)
454 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
455
456 clear_filter = true;
457 txpending = 1;
458 } else {
459 bf->bf_state.bf_type |= BUF_XRETRY;
460 txfail = 1;
461 sendbar = 1;
462 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530463 }
464 }
465
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400466 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
467 bf_next == NULL) {
Vasanthakumar Thiagarajancbfe89c2009-06-24 18:58:47 +0530468 /*
469 * Make sure the last desc is reclaimed if it
470 * not a holding desc.
471 */
472 if (!bf_last->bf_stale)
473 list_move_tail(&bf->list, &bf_head);
474 else
475 INIT_LIST_HEAD(&bf_head);
Sujithe8324352009-01-16 21:38:42 +0530476 } else {
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -0700477 BUG_ON(list_empty(bf_q));
Sujithd43f30152009-01-16 21:38:53 +0530478 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530479 }
480
Felix Fietkau90fa5392010-09-20 13:45:38 +0200481 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530482 /*
483 * complete the acked-ones/xretried ones; update
484 * block-ack window
485 */
486 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100487 ath_tx_update_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +0530488 spin_unlock_bh(&txq->axq_lock);
489
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530490 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200491 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100492 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530493 rc_update = false;
494 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100495 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530496 }
497
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700498 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
499 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530500 } else {
Sujithd43f30152009-01-16 21:38:53 +0530501 /* retry the un-acked ones */
Felix Fietkau55195412011-04-17 23:28:09 +0200502 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400503 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
504 if (bf->bf_next == NULL && bf_last->bf_stale) {
505 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530506
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400507 tbf = ath_clone_txbuf(sc, bf_last);
508 /*
509 * Update tx baw and complete the
510 * frame with failed status if we
511 * run out of tx buf.
512 */
513 if (!tbf) {
514 spin_lock_bh(&txq->axq_lock);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100515 ath_tx_update_baw(sc, tid, fi->seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400516 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400517
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400518 bf->bf_state.bf_type |=
519 BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100520 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100521 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400522 ath_tx_complete_buf(sc, bf, txq,
523 &bf_head,
524 ts, 0, 0);
525 break;
526 }
527
528 ath9k_hw_cleartxdesc(sc->sc_ah,
529 tbf->bf_desc);
530 list_add_tail(&tbf->list, &bf_head);
531 } else {
532 /*
533 * Clear descriptor status words for
534 * software retry
535 */
536 ath9k_hw_cleartxdesc(sc->sc_ah,
537 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400538 }
Sujithe8324352009-01-16 21:38:42 +0530539 }
540
541 /*
542 * Put this buffer to the temporary pending
543 * queue to retain ordering
544 */
545 list_splice_tail_init(&bf_head, &bf_pending);
546 }
547
548 bf = bf_next;
549 }
550
Felix Fietkau4cee7862010-07-23 03:53:16 +0200551 /* prepend un-acked frames to the beginning of the pending frame queue */
552 if (!list_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200553 if (an->sleeping)
554 ieee80211_sta_set_tim(sta);
555
Felix Fietkau4cee7862010-07-23 03:53:16 +0200556 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200557 if (clear_filter)
558 tid->ac->clear_ps_filter = true;
Felix Fietkau4cee7862010-07-23 03:53:16 +0200559 list_splice(&bf_pending, &tid->buf_q);
560 ath_tx_queue_tid(txq, tid);
561 spin_unlock_bh(&txq->axq_lock);
562 }
563
Sujithe8324352009-01-16 21:38:42 +0530564 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200565 ath_tx_flush_tid(sc, tid);
566
Sujithe8324352009-01-16 21:38:42 +0530567 if (tid->baw_head == tid->baw_tail) {
568 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530569 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530570 }
Sujithe8324352009-01-16 21:38:42 +0530571 }
572
Sujith1286ec62009-01-27 13:30:37 +0530573 rcu_read_unlock();
574
Vivek Natarajanbdd62c02011-01-27 14:45:10 +0530575 if (needreset) {
576 spin_unlock_bh(&sc->sc_pcu_lock);
Sujithe8324352009-01-16 21:38:42 +0530577 ath_reset(sc, false);
Vivek Natarajanbdd62c02011-01-27 14:45:10 +0530578 spin_lock_bh(&sc->sc_pcu_lock);
579 }
Sujithe8324352009-01-16 21:38:42 +0530580}
581
582static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
583 struct ath_atx_tid *tid)
584{
Sujithe8324352009-01-16 21:38:42 +0530585 struct sk_buff *skb;
586 struct ieee80211_tx_info *tx_info;
587 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530588 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530589 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530590 int i;
591
Sujitha22be222009-03-30 15:28:36 +0530592 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530593 tx_info = IEEE80211_SKB_CB(skb);
594 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530595
596 /*
597 * Find the lowest frame length among the rate series that will have a
598 * 4ms transmit duration.
599 * TODO - TXOP limit needs to be considered.
600 */
601 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
602
603 for (i = 0; i < 4; i++) {
604 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100605 int modeidx;
606 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530607 legacy = 1;
608 break;
609 }
610
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200611 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100612 modeidx = MCS_HT40;
613 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200614 modeidx = MCS_HT20;
615
616 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
617 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100618
619 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530620 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530621 }
622 }
623
624 /*
625 * limit aggregate size by the minimum rate if rate selected is
626 * not a probe rate, if rate selected is a probe rate then
627 * avoid aggregation of this packet.
628 */
629 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
630 return 0;
631
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530632 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
633 aggr_limit = min((max_4ms_framelen * 3) / 8,
634 (u32)ATH_AMPDU_LIMIT_MAX);
635 else
636 aggr_limit = min(max_4ms_framelen,
637 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530638
639 /*
640 * h/w can accept aggregates upto 16 bit lengths (65535).
641 * The IE, however can hold upto 65536, which shows up here
642 * as zero. Ignore 65536 since we are constrained by hw.
643 */
Sujith4ef70842009-07-23 15:32:41 +0530644 if (tid->an->maxampdu)
645 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530646
647 return aggr_limit;
648}
649
650/*
Sujithd43f30152009-01-16 21:38:53 +0530651 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530652 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530653 */
654static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
655 struct ath_buf *bf, u16 frmlen)
656{
Sujithe8324352009-01-16 21:38:42 +0530657 struct sk_buff *skb = bf->bf_mpdu;
658 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530659 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530660 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100661 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200662 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100663 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530664
665 /* Select standard number of delimiters based on frame length alone */
666 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
667
668 /*
669 * If encryption enabled, hardware requires some more padding between
670 * subframes.
671 * TODO - this could be improved to be dependent on the rate.
672 * The hardware can keep up at lower rates, but not higher rates
673 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100674 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
Sujithe8324352009-01-16 21:38:42 +0530675 ndelim += ATH_AGGR_ENCRYPTDELIM;
676
677 /*
678 * Convert desired mpdu density from microeconds to bytes based
679 * on highest rate in rate series (i.e. first rate) to determine
680 * required minimum length for subframe. Take into account
681 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530682 *
Sujithe8324352009-01-16 21:38:42 +0530683 * If there is no mpdu density restriction, no further calculation
684 * is needed.
685 */
Sujith4ef70842009-07-23 15:32:41 +0530686
687 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530688 return ndelim;
689
690 rix = tx_info->control.rates[0].idx;
691 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530692 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
693 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
694
695 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530696 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530697 else
Sujith4ef70842009-07-23 15:32:41 +0530698 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530699
700 if (nsymbols == 0)
701 nsymbols = 1;
702
Felix Fietkauc6663872010-04-19 19:57:33 +0200703 streams = HT_RC_2_STREAMS(rix);
704 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530705 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
706
Sujithe8324352009-01-16 21:38:42 +0530707 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530708 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
709 ndelim = max(mindelim, ndelim);
710 }
711
712 return ndelim;
713}
714
715static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530716 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530717 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100718 struct list_head *bf_q,
719 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530720{
721#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Sujithd43f30152009-01-16 21:38:53 +0530722 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
723 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530724 u16 aggr_limit = 0, al = 0, bpad = 0,
725 al_delta, h_baw = tid->baw_size / 2;
726 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200727 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100728 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530729
730 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
731
732 do {
733 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100734 fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530735
Sujithd43f30152009-01-16 21:38:53 +0530736 /* do not step over block-ack window */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100737 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530738 status = ATH_AGGR_BAW_CLOSED;
739 break;
740 }
741
742 if (!rl) {
743 aggr_limit = ath_lookup_rate(sc, bf, tid);
744 rl = 1;
745 }
746
Sujithd43f30152009-01-16 21:38:53 +0530747 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100748 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530749
Sujithd43f30152009-01-16 21:38:53 +0530750 if (nframes &&
751 (aggr_limit < (al + bpad + al_delta + prev_al))) {
Sujithe8324352009-01-16 21:38:42 +0530752 status = ATH_AGGR_LIMITED;
753 break;
754 }
755
Felix Fietkau0299a502010-10-21 02:47:24 +0200756 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
757 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
758 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
759 break;
760
Sujithd43f30152009-01-16 21:38:53 +0530761 /* do not exceed subframe limit */
762 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530763 status = ATH_AGGR_LIMITED;
764 break;
765 }
Sujithd43f30152009-01-16 21:38:53 +0530766 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530767
Sujithd43f30152009-01-16 21:38:53 +0530768 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530769 al += bpad + al_delta;
770
771 /*
772 * Get the delimiters needed to meet the MPDU
773 * density for this node.
774 */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100775 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530776 bpad = PADBYTES(al_delta) + (ndelim << 2);
777
778 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400779 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530780
Sujithd43f30152009-01-16 21:38:53 +0530781 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100782 if (!fi->retries)
783 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithd43f30152009-01-16 21:38:53 +0530784 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
785 list_move_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530786 if (bf_prev) {
787 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400788 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
789 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530790 }
791 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530792
Sujithe8324352009-01-16 21:38:42 +0530793 } while (!list_empty(&tid->buf_q));
794
Felix Fietkau269c44b2010-11-14 15:20:06 +0100795 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530796
Sujithe8324352009-01-16 21:38:42 +0530797 return status;
798#undef PADBYTES
799}
800
801static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
802 struct ath_atx_tid *tid)
803{
Sujithd43f30152009-01-16 21:38:53 +0530804 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530805 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100806 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530807 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100808 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530809
810 do {
811 if (list_empty(&tid->buf_q))
812 return;
813
814 INIT_LIST_HEAD(&bf_q);
815
Felix Fietkau269c44b2010-11-14 15:20:06 +0100816 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530817
818 /*
Sujithd43f30152009-01-16 21:38:53 +0530819 * no frames picked up to be aggregated;
820 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530821 */
822 if (list_empty(&bf_q))
823 break;
824
825 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530826 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530827
Felix Fietkau55195412011-04-17 23:28:09 +0200828 if (tid->ac->clear_ps_filter) {
829 tid->ac->clear_ps_filter = false;
830 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
831 }
832
Sujithd43f30152009-01-16 21:38:53 +0530833 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100834 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100835 fi = get_frame_info(bf->bf_mpdu);
836
Sujithe8324352009-01-16 21:38:42 +0530837 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530838 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100839 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +0530840 ath_tx_txqaddbuf(sc, txq, &bf_q);
841 continue;
842 }
843
Sujithd43f30152009-01-16 21:38:53 +0530844 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530845 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100846 ath_buf_set_rate(sc, bf, aggr_len);
847 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530848
Sujithd43f30152009-01-16 21:38:53 +0530849 /* anchor last desc of aggregate */
850 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530851
Sujithe8324352009-01-16 21:38:42 +0530852 ath_tx_txqaddbuf(sc, txq, &bf_q);
Sujithfec247c2009-07-27 12:08:16 +0530853 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530854
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100855 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530856 status != ATH_AGGR_BAW_CLOSED);
857}
858
Felix Fietkau231c3a12010-09-20 19:35:28 +0200859int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
860 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530861{
862 struct ath_atx_tid *txtid;
863 struct ath_node *an;
864
865 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530866 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200867
868 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
869 return -EAGAIN;
870
Sujithf83da962009-07-23 15:32:37 +0530871 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200872 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700873 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200874
Felix Fietkau2ed72222011-01-10 17:05:49 -0700875 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
876 txtid->baw_head = txtid->baw_tail = 0;
877
Felix Fietkau231c3a12010-09-20 19:35:28 +0200878 return 0;
Sujithe8324352009-01-16 21:38:42 +0530879}
880
Sujithf83da962009-07-23 15:32:37 +0530881void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530882{
883 struct ath_node *an = (struct ath_node *)sta->drv_priv;
884 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100885 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530886
887 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530888 return;
Sujithe8324352009-01-16 21:38:42 +0530889
890 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530891 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530892 return;
Sujithe8324352009-01-16 21:38:42 +0530893 }
894
Sujithe8324352009-01-16 21:38:42 +0530895 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200896 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200897
898 /*
899 * If frames are still being transmitted for this TID, they will be
900 * cleaned up during tx completion. To prevent race conditions, this
901 * TID can only be reused after all in-progress subframes have been
902 * completed.
903 */
904 if (txtid->baw_head != txtid->baw_tail)
905 txtid->state |= AGGR_CLEANUP;
906 else
907 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530908 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530909
Felix Fietkau90fa5392010-09-20 13:45:38 +0200910 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530911}
912
Felix Fietkau55195412011-04-17 23:28:09 +0200913bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
914{
915 struct ath_atx_tid *tid;
916 struct ath_atx_ac *ac;
917 struct ath_txq *txq;
918 bool buffered = false;
919 int tidno;
920
921 for (tidno = 0, tid = &an->tid[tidno];
922 tidno < WME_NUM_TID; tidno++, tid++) {
923
924 if (!tid->sched)
925 continue;
926
927 ac = tid->ac;
928 txq = ac->txq;
929
930 spin_lock_bh(&txq->axq_lock);
931
932 if (!list_empty(&tid->buf_q))
933 buffered = true;
934
935 tid->sched = false;
936 list_del(&tid->list);
937
938 if (ac->sched) {
939 ac->sched = false;
940 list_del(&ac->list);
941 }
942
943 spin_unlock_bh(&txq->axq_lock);
944 }
945
946 return buffered;
947}
948
949void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
950{
951 struct ath_atx_tid *tid;
952 struct ath_atx_ac *ac;
953 struct ath_txq *txq;
954 int tidno;
955
956 for (tidno = 0, tid = &an->tid[tidno];
957 tidno < WME_NUM_TID; tidno++, tid++) {
958
959 ac = tid->ac;
960 txq = ac->txq;
961
962 spin_lock_bh(&txq->axq_lock);
963 ac->clear_ps_filter = true;
964
965 if (!list_empty(&tid->buf_q) && !tid->paused) {
966 ath_tx_queue_tid(txq, tid);
967 ath_txq_schedule(sc, txq);
968 }
969
970 spin_unlock_bh(&txq->axq_lock);
971 }
972}
973
Sujithe8324352009-01-16 21:38:42 +0530974void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
975{
976 struct ath_atx_tid *txtid;
977 struct ath_node *an;
978
979 an = (struct ath_node *)sta->drv_priv;
980
981 if (sc->sc_flags & SC_OP_TXAGGR) {
982 txtid = ATH_AN_2_TID(an, tid);
983 txtid->baw_size =
984 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
985 txtid->state |= AGGR_ADDBA_COMPLETE;
986 txtid->state &= ~AGGR_ADDBA_PROGRESS;
987 ath_tx_resume_tid(sc, txtid);
988 }
989}
990
Sujithe8324352009-01-16 21:38:42 +0530991/********************/
992/* Queue Management */
993/********************/
994
Sujithe8324352009-01-16 21:38:42 +0530995static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
996 struct ath_txq *txq)
997{
998 struct ath_atx_ac *ac, *ac_tmp;
999 struct ath_atx_tid *tid, *tid_tmp;
1000
1001 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1002 list_del(&ac->list);
1003 ac->sched = false;
1004 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1005 list_del(&tid->list);
1006 tid->sched = false;
1007 ath_tid_drain(sc, txq, tid);
1008 }
1009 }
1010}
1011
1012struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1013{
Sujithcbe61d82009-02-09 13:27:12 +05301014 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001015 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301016 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001017 static const int subtype_txq_to_hwq[] = {
1018 [WME_AC_BE] = ATH_TXQ_AC_BE,
1019 [WME_AC_BK] = ATH_TXQ_AC_BK,
1020 [WME_AC_VI] = ATH_TXQ_AC_VI,
1021 [WME_AC_VO] = ATH_TXQ_AC_VO,
1022 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001023 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301024
1025 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001026 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301027 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1028 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1029 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1030 qi.tqi_physCompBuf = 0;
1031
1032 /*
1033 * Enable interrupts only for EOL and DESC conditions.
1034 * We mark tx descriptors to receive a DESC interrupt
1035 * when a tx queue gets deep; otherwise waiting for the
1036 * EOL to reap descriptors. Note that this is done to
1037 * reduce interrupt load and this only defers reaping
1038 * descriptors, never transmitting frames. Aside from
1039 * reducing interrupts this also permits more concurrency.
1040 * The only potential downside is if the tx queue backs
1041 * up in which case the top half of the kernel may backup
1042 * due to a lack of tx descriptors.
1043 *
1044 * The UAPSD queue is an exception, since we take a desc-
1045 * based intr on the EOSP frames.
1046 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001047 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1048 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1049 TXQ_FLAG_TXERRINT_ENABLE;
1050 } else {
1051 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1052 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1053 else
1054 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1055 TXQ_FLAG_TXDESCINT_ENABLE;
1056 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001057 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1058 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301059 /*
1060 * NB: don't print a message, this happens
1061 * normally on parts with too few tx queues
1062 */
1063 return NULL;
1064 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001065 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001066 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001067 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1068 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301069 return NULL;
1070 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001071 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1072 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301073
Ben Greear60f2d1d2011-01-09 23:11:52 -08001074 txq->axq_qnum = axq_qnum;
1075 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301076 txq->axq_link = NULL;
1077 INIT_LIST_HEAD(&txq->axq_q);
1078 INIT_LIST_HEAD(&txq->axq_acq);
1079 spin_lock_init(&txq->axq_lock);
1080 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001081 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001082 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001083 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001084
1085 txq->txq_headidx = txq->txq_tailidx = 0;
1086 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1087 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1088 INIT_LIST_HEAD(&txq->txq_fifo_pending);
Sujithe8324352009-01-16 21:38:42 +05301089 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001090 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301091}
1092
Sujithe8324352009-01-16 21:38:42 +05301093int ath_txq_update(struct ath_softc *sc, int qnum,
1094 struct ath9k_tx_queue_info *qinfo)
1095{
Sujithcbe61d82009-02-09 13:27:12 +05301096 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301097 int error = 0;
1098 struct ath9k_tx_queue_info qi;
1099
1100 if (qnum == sc->beacon.beaconq) {
1101 /*
1102 * XXX: for beacon queue, we just save the parameter.
1103 * It will be picked up by ath_beaconq_config when
1104 * it's necessary.
1105 */
1106 sc->beacon.beacon_qi = *qinfo;
1107 return 0;
1108 }
1109
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001110 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301111
1112 ath9k_hw_get_txq_props(ah, qnum, &qi);
1113 qi.tqi_aifs = qinfo->tqi_aifs;
1114 qi.tqi_cwmin = qinfo->tqi_cwmin;
1115 qi.tqi_cwmax = qinfo->tqi_cwmax;
1116 qi.tqi_burstTime = qinfo->tqi_burstTime;
1117 qi.tqi_readyTime = qinfo->tqi_readyTime;
1118
1119 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001120 ath_err(ath9k_hw_common(sc->sc_ah),
1121 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301122 error = -EIO;
1123 } else {
1124 ath9k_hw_resettxqueue(ah, qnum);
1125 }
1126
1127 return error;
1128}
1129
1130int ath_cabq_update(struct ath_softc *sc)
1131{
1132 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001133 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301134 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301135
1136 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1137 /*
1138 * Ensure the readytime % is within the bounds.
1139 */
Sujith17d79042009-02-09 13:27:03 +05301140 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1141 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1142 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1143 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301144
Steve Brown9814f6b2011-02-07 17:10:39 -07001145 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301146 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301147 ath_txq_update(sc, qnum, &qi);
1148
1149 return 0;
1150}
1151
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001152static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1153{
1154 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1155 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1156}
1157
Sujith043a0402009-01-16 21:38:47 +05301158/*
1159 * Drain a given TX queue (could be Beacon or Data)
1160 *
1161 * This assumes output has been stopped and
1162 * we do not need to block ath_tx_tasklet.
1163 */
1164void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
Sujithe8324352009-01-16 21:38:42 +05301165{
1166 struct ath_buf *bf, *lastbf;
1167 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001168 struct ath_tx_status ts;
1169
1170 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301171 INIT_LIST_HEAD(&bf_head);
1172
Sujithe8324352009-01-16 21:38:42 +05301173 for (;;) {
1174 spin_lock_bh(&txq->axq_lock);
1175
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001176 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1177 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1178 txq->txq_headidx = txq->txq_tailidx = 0;
1179 spin_unlock_bh(&txq->axq_lock);
1180 break;
1181 } else {
1182 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1183 struct ath_buf, list);
1184 }
1185 } else {
1186 if (list_empty(&txq->axq_q)) {
1187 txq->axq_link = NULL;
1188 spin_unlock_bh(&txq->axq_lock);
1189 break;
1190 }
1191 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1192 list);
Sujithe8324352009-01-16 21:38:42 +05301193
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001194 if (bf->bf_stale) {
1195 list_del(&bf->list);
1196 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301197
Felix Fietkau0a8cea82010-04-19 19:57:30 +02001198 ath_tx_return_buffer(sc, bf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001199 continue;
1200 }
Sujithe8324352009-01-16 21:38:42 +05301201 }
1202
1203 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05301204
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001205 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1206 list_cut_position(&bf_head,
1207 &txq->txq_fifo[txq->txq_tailidx],
1208 &lastbf->list);
1209 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1210 } else {
1211 /* remove ath_buf's of the same mpdu from txq */
1212 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1213 }
1214
Sujithe8324352009-01-16 21:38:42 +05301215 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001216 if (bf_is_ampdu_not_probing(bf))
1217 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301218 spin_unlock_bh(&txq->axq_lock);
1219
1220 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001221 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1222 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301223 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001224 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +05301225 }
1226
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001227 spin_lock_bh(&txq->axq_lock);
1228 txq->axq_tx_inprogress = false;
1229 spin_unlock_bh(&txq->axq_lock);
1230
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001231 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1232 spin_lock_bh(&txq->axq_lock);
1233 while (!list_empty(&txq->txq_fifo_pending)) {
1234 bf = list_first_entry(&txq->txq_fifo_pending,
1235 struct ath_buf, list);
1236 list_cut_position(&bf_head,
1237 &txq->txq_fifo_pending,
1238 &bf->bf_lastbf->list);
1239 spin_unlock_bh(&txq->axq_lock);
1240
1241 if (bf_isampdu(bf))
1242 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
Felix Fietkauc5992612010-11-14 15:20:09 +01001243 &ts, 0, retry_tx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001244 else
1245 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1246 &ts, 0, 0);
1247 spin_lock_bh(&txq->axq_lock);
1248 }
1249 spin_unlock_bh(&txq->axq_lock);
1250 }
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001251
1252 /* flush any pending frames if aggregation is enabled */
1253 if (sc->sc_flags & SC_OP_TXAGGR) {
1254 if (!retry_tx) {
1255 spin_lock_bh(&txq->axq_lock);
1256 ath_txq_drain_pending_buffers(sc, txq);
1257 spin_unlock_bh(&txq->axq_lock);
1258 }
1259 }
Sujithe8324352009-01-16 21:38:42 +05301260}
1261
Felix Fietkau080e1a22010-12-05 20:17:53 +01001262bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301263{
Sujithcbe61d82009-02-09 13:27:12 +05301264 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001265 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301266 struct ath_txq *txq;
1267 int i, npend = 0;
1268
1269 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001270 return true;
Sujith043a0402009-01-16 21:38:47 +05301271
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001272 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301273
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001274 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301275 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001276 if (!ATH_TXQ_SETUP(sc, i))
1277 continue;
1278
1279 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301280 }
1281
Felix Fietkau080e1a22010-12-05 20:17:53 +01001282 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001283 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301284
1285 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001286 if (!ATH_TXQ_SETUP(sc, i))
1287 continue;
1288
1289 /*
1290 * The caller will resume queues with ieee80211_wake_queues.
1291 * Mark the queue as not stopped to prevent ath_tx_complete
1292 * from waking the queue too early.
1293 */
1294 txq = &sc->tx.txq[i];
1295 txq->stopped = false;
1296 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301297 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001298
1299 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301300}
1301
Sujithe8324352009-01-16 21:38:42 +05301302void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1303{
1304 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1305 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1306}
1307
Ben Greear7755bad2011-01-18 17:30:00 -08001308/* For each axq_acq entry, for each tid, try to schedule packets
1309 * for transmit until ampdu_depth has reached min Q depth.
1310 */
Sujithe8324352009-01-16 21:38:42 +05301311void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1312{
Ben Greear7755bad2011-01-18 17:30:00 -08001313 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1314 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301315
Felix Fietkau21f28e62011-01-15 14:30:14 +01001316 if (list_empty(&txq->axq_acq) ||
1317 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301318 return;
1319
1320 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001321 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301322
Ben Greear7755bad2011-01-18 17:30:00 -08001323 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1324 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1325 list_del(&ac->list);
1326 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301327
Ben Greear7755bad2011-01-18 17:30:00 -08001328 while (!list_empty(&ac->tid_q)) {
1329 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1330 list);
1331 list_del(&tid->list);
1332 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301333
Ben Greear7755bad2011-01-18 17:30:00 -08001334 if (tid->paused)
1335 continue;
Sujithe8324352009-01-16 21:38:42 +05301336
Ben Greear7755bad2011-01-18 17:30:00 -08001337 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301338
Ben Greear7755bad2011-01-18 17:30:00 -08001339 /*
1340 * add tid to round-robin queue if more frames
1341 * are pending for the tid
1342 */
1343 if (!list_empty(&tid->buf_q))
1344 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301345
Ben Greear7755bad2011-01-18 17:30:00 -08001346 if (tid == last_tid ||
1347 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1348 break;
Sujithe8324352009-01-16 21:38:42 +05301349 }
Ben Greear7755bad2011-01-18 17:30:00 -08001350
1351 if (!list_empty(&ac->tid_q)) {
1352 if (!ac->sched) {
1353 ac->sched = true;
1354 list_add_tail(&ac->list, &txq->axq_acq);
1355 }
1356 }
1357
1358 if (ac == last_ac ||
1359 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1360 return;
Sujithe8324352009-01-16 21:38:42 +05301361 }
1362}
1363
Sujithe8324352009-01-16 21:38:42 +05301364/***********/
1365/* TX, DMA */
1366/***********/
1367
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001368/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001369 * Insert a chain of ath_buf (descriptors) on a txq and
1370 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001371 */
Sujith102e0572008-10-29 10:15:16 +05301372static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1373 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001374{
Sujithcbe61d82009-02-09 13:27:12 +05301375 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001376 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001377 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +05301378
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001379 /*
1380 * Insert the frame on the outbound list and
1381 * pass it on to the hardware.
1382 */
1383
1384 if (list_empty(head))
1385 return;
1386
1387 bf = list_first_entry(head, struct ath_buf, list);
1388
Joe Perches226afe62010-12-02 19:12:37 -08001389 ath_dbg(common, ATH_DBG_QUEUE,
1390 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001391
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001392 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1393 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1394 list_splice_tail_init(head, &txq->txq_fifo_pending);
1395 return;
1396 }
1397 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
Joe Perches226afe62010-12-02 19:12:37 -08001398 ath_dbg(common, ATH_DBG_XMIT,
1399 "Initializing tx fifo %d which is non-empty\n",
1400 txq->txq_headidx);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001401 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1402 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1403 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001404 TX_STAT_INC(txq->axq_qnum, puttxbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001405 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001406 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1407 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001408 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001409 list_splice_tail_init(head, &txq->axq_q);
1410
1411 if (txq->axq_link == NULL) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001412 TX_STAT_INC(txq->axq_qnum, puttxbuf);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001413 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001414 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1415 txq->axq_qnum, ito64(bf->bf_daddr),
1416 bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001417 } else {
1418 *txq->axq_link = bf->bf_daddr;
Joe Perches226afe62010-12-02 19:12:37 -08001419 ath_dbg(common, ATH_DBG_XMIT,
1420 "link[%u] (%p)=%llx (%p)\n",
1421 txq->axq_qnum, txq->axq_link,
1422 ito64(bf->bf_daddr), bf->bf_desc);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001423 }
1424 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1425 &txq->axq_link);
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001426 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001427 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001428 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001429 txq->axq_depth++;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001430 if (bf_is_ampdu_not_probing(bf))
1431 txq->axq_ampdu_depth++;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001432}
1433
Sujithe8324352009-01-16 21:38:42 +05301434static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau04caf862010-11-14 15:20:12 +01001435 struct ath_buf *bf, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301436{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001437 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau04caf862010-11-14 15:20:12 +01001438 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301439
Sujithe8324352009-01-16 21:38:42 +05301440 bf->bf_state.bf_type |= BUF_AMPDU;
1441
1442 /*
1443 * Do not queue to h/w when any of the following conditions is true:
1444 * - there are pending frames in software queue
1445 * - the TID is currently paused for ADDBA/BAR request
1446 * - seqno is not within block-ack window
1447 * - h/w queue depth exceeds low water mark
1448 */
1449 if (!list_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001450 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001451 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001452 /*
Sujithe8324352009-01-16 21:38:42 +05301453 * Add this frame to software queue for scheduling later
1454 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001455 */
Ben Greearbda8add2011-01-09 23:11:48 -08001456 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau04caf862010-11-14 15:20:12 +01001457 list_add_tail(&bf->list, &tid->buf_q);
Sujithe8324352009-01-16 21:38:42 +05301458 ath_tx_queue_tid(txctl->txq, tid);
1459 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001460 }
1461
Felix Fietkau04caf862010-11-14 15:20:12 +01001462 INIT_LIST_HEAD(&bf_head);
1463 list_add(&bf->list, &bf_head);
1464
Sujithe8324352009-01-16 21:38:42 +05301465 /* Add sub-frame to BAW */
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001466 if (!fi->retries)
1467 ath_tx_addto_baw(sc, tid, fi->seqno);
Sujithe8324352009-01-16 21:38:42 +05301468
1469 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001470 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301471 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001472 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkau04caf862010-11-14 15:20:12 +01001473 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
Sujithc4288392008-11-18 09:09:30 +05301474}
1475
Felix Fietkau82b873a2010-11-11 03:18:37 +01001476static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1477 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001478 struct list_head *bf_head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001479{
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001480 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +05301481 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001482
Sujithe8324352009-01-16 21:38:42 +05301483 bf = list_first_entry(bf_head, struct ath_buf, list);
1484 bf->bf_state.bf_type &= ~BUF_AMPDU;
1485
1486 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001487 if (tid)
1488 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301489
Sujithd43f30152009-01-16 21:38:53 +05301490 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001491 fi = get_frame_info(bf->bf_mpdu);
1492 ath_buf_set_rate(sc, bf, fi->framelen);
Sujithe8324352009-01-16 21:38:42 +05301493 ath_tx_txqaddbuf(sc, txq, bf_head);
Sujithfec247c2009-07-27 12:08:16 +05301494 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001495}
1496
Sujith528f0c62008-10-29 10:14:26 +05301497static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001498{
Sujith528f0c62008-10-29 10:14:26 +05301499 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001500 enum ath9k_pkt_type htype;
1501 __le16 fc;
1502
Sujith528f0c62008-10-29 10:14:26 +05301503 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001504 fc = hdr->frame_control;
1505
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001506 if (ieee80211_is_beacon(fc))
1507 htype = ATH9K_PKT_TYPE_BEACON;
1508 else if (ieee80211_is_probe_resp(fc))
1509 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1510 else if (ieee80211_is_atim(fc))
1511 htype = ATH9K_PKT_TYPE_ATIM;
1512 else if (ieee80211_is_pspoll(fc))
1513 htype = ATH9K_PKT_TYPE_PSPOLL;
1514 else
1515 htype = ATH9K_PKT_TYPE_NORMAL;
1516
1517 return htype;
1518}
1519
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001520static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1521 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301522{
Felix Fietkau9ac586152011-01-24 19:23:18 +01001523 struct ath_softc *sc = hw->priv;
Sujith528f0c62008-10-29 10:14:26 +05301524 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001525 struct ieee80211_sta *sta = tx_info->control.sta;
1526 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Sujith528f0c62008-10-29 10:14:26 +05301527 struct ieee80211_hdr *hdr;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001528 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001529 struct ath_node *an = NULL;
Sujith528f0c62008-10-29 10:14:26 +05301530 struct ath_atx_tid *tid;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001531 enum ath9k_key_type keytype;
1532 u16 seqno = 0;
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001533 u8 tidno;
Sujith528f0c62008-10-29 10:14:26 +05301534
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001535 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301536
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001537 if (sta)
1538 an = (struct ath_node *) sta->drv_priv;
1539
Sujith528f0c62008-10-29 10:14:26 +05301540 hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001541 if (an && ieee80211_is_data_qos(hdr->frame_control) &&
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001542 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001543
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001544 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1545
1546 /*
1547 * Override seqno set by upper layer with the one
1548 * in tx aggregation state.
1549 */
1550 tid = ATH_AN_2_TID(an, tidno);
1551 seqno = tid->seq_next;
1552 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1553 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1554 }
1555
1556 memset(fi, 0, sizeof(*fi));
1557 if (hw_key)
1558 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001559 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1560 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001561 else
1562 fi->keyix = ATH9K_TXKEYIX_INVALID;
1563 fi->keytype = keytype;
1564 fi->framelen = framelen;
1565 fi->seqno = seqno;
Sujith528f0c62008-10-29 10:14:26 +05301566}
1567
Felix Fietkau82b873a2010-11-11 03:18:37 +01001568static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301569{
1570 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1571 int flags = 0;
1572
Sujith528f0c62008-10-29 10:14:26 +05301573 flags |= ATH9K_TXDESC_INTREQ;
1574
1575 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1576 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301577
Felix Fietkau82b873a2010-11-11 03:18:37 +01001578 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001579 flags |= ATH9K_TXDESC_LDPC;
1580
Sujith528f0c62008-10-29 10:14:26 +05301581 return flags;
1582}
1583
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001584/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001585 * rix - rate index
1586 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1587 * width - 0 for 20 MHz, 1 for 40 MHz
1588 * half_gi - to use 4us v/s 3.6 us for symbol time
1589 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001590static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301591 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001592{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001593 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001594 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301595
1596 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001597 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001598 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001599 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001600 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1601
1602 if (!half_gi)
1603 duration = SYMBOL_TIME(nsymbols);
1604 else
1605 duration = SYMBOL_TIME_HALFGI(nsymbols);
1606
Sujithe63835b2008-11-18 09:07:53 +05301607 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001608 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301609
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001610 return duration;
1611}
1612
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301613u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1614{
1615 struct ath_hw *ah = sc->sc_ah;
1616 struct ath9k_channel *curchan = ah->curchan;
1617 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1618 (curchan->channelFlags & CHANNEL_5GHZ) &&
1619 (chainmask == 0x7) && (rate < 0x90))
1620 return 0x3;
1621 else
1622 return chainmask;
1623}
1624
Felix Fietkau269c44b2010-11-14 15:20:06 +01001625static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001626{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001627 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001628 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301629 struct sk_buff *skb;
1630 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301631 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001632 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301633 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301634 int i, flags = 0;
1635 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301636 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301637
1638 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301639
Sujitha22be222009-03-30 15:28:36 +05301640 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301641 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301642 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301643 hdr = (struct ieee80211_hdr *)skb->data;
1644 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301645
Sujithc89424d2009-01-30 14:29:28 +05301646 /*
1647 * We check if Short Preamble is needed for the CTS rate by
1648 * checking the BSS's global flag.
1649 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1650 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001651 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1652 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301653 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001654 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001655
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001656 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001657 bool is_40, is_sgi, is_sp;
1658 int phy;
1659
Sujithe63835b2008-11-18 09:07:53 +05301660 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001661 continue;
1662
Sujitha8efee42008-11-18 09:07:30 +05301663 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301664 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001665
Felix Fietkau27032052010-01-17 21:08:50 +01001666 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1667 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
Sujithc89424d2009-01-30 14:29:28 +05301668 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001669 flags |= ATH9K_TXDESC_RTSENA;
1670 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1671 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1672 flags |= ATH9K_TXDESC_CTSENA;
1673 }
1674
Sujithc89424d2009-01-30 14:29:28 +05301675 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1676 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1677 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1678 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001679
Felix Fietkau545750d2009-11-23 22:21:01 +01001680 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1681 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1682 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1683
1684 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1685 /* MCS rates */
1686 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301687 series[i].ChSel = ath_txchainmask_reduction(sc,
1688 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001689 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001690 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001691 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1692 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001693 continue;
1694 }
1695
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301696 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001697 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1698 !(rate->flags & IEEE80211_RATE_ERP_G))
1699 phy = WLAN_RC_PHY_CCK;
1700 else
1701 phy = WLAN_RC_PHY_OFDM;
1702
1703 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1704 series[i].Rate = rate->hw_value;
1705 if (rate->hw_value_short) {
1706 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1707 series[i].Rate |= rate->hw_value_short;
1708 } else {
1709 is_sp = false;
1710 }
1711
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301712 if (bf->bf_state.bfs_paprd)
1713 series[i].ChSel = common->tx_chainmask;
1714 else
1715 series[i].ChSel = ath_txchainmask_reduction(sc,
1716 common->tx_chainmask, series[i].Rate);
1717
Felix Fietkau545750d2009-11-23 22:21:01 +01001718 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001719 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001720 }
1721
Felix Fietkau27032052010-01-17 21:08:50 +01001722 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001723 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001724 flags &= ~ATH9K_TXDESC_RTSENA;
1725
1726 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1727 if (flags & ATH9K_TXDESC_RTSENA)
1728 flags &= ~ATH9K_TXDESC_CTSENA;
1729
Sujithe63835b2008-11-18 09:07:53 +05301730 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301731 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1732 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301733 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301734 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301735
Sujith17d79042009-02-09 13:27:03 +05301736 if (sc->config.ath_aggr_prot && flags)
Sujithc89424d2009-01-30 14:29:28 +05301737 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001738}
1739
Felix Fietkau82b873a2010-11-11 03:18:37 +01001740static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
Felix Fietkau04caf862010-11-14 15:20:12 +01001741 struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001742 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301743{
Felix Fietkau9ac586152011-01-24 19:23:18 +01001744 struct ath_softc *sc = hw->priv;
Felix Fietkau04caf862010-11-14 15:20:12 +01001745 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001746 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001747 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001748 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001749 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001750 int frm_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001751
1752 bf = ath_tx_get_buffer(sc);
1753 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001754 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001755 return NULL;
1756 }
Sujithe8324352009-01-16 21:38:42 +05301757
Sujithe8324352009-01-16 21:38:42 +05301758 ATH_TXBUF_RESET(bf);
1759
Felix Fietkau82b873a2010-11-11 03:18:37 +01001760 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301761 bf->bf_mpdu = skb;
1762
Ben Greearc1739eb32010-10-14 12:45:29 -07001763 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1764 skb->len, DMA_TO_DEVICE);
1765 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301766 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001767 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001768 ath_err(ath9k_hw_common(sc->sc_ah),
1769 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001770 ath_tx_return_buffer(sc, bf);
1771 return NULL;
Sujithe8324352009-01-16 21:38:42 +05301772 }
1773
Sujithe8324352009-01-16 21:38:42 +05301774 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301775
1776 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001777 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301778
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001779 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1780 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301781
1782 ath9k_hw_filltxdesc(ah, ds,
1783 skb->len, /* segment length */
1784 true, /* first segment */
1785 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001786 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001787 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001788 txq->axq_qnum);
1789
1790
1791 return bf;
1792}
1793
1794/* FIXME: tx power */
1795static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1796 struct ath_tx_control *txctl)
1797{
1798 struct sk_buff *skb = bf->bf_mpdu;
1799 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1800 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau04caf862010-11-14 15:20:12 +01001801 struct list_head bf_head;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001802 struct ath_atx_tid *tid = NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001803 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301804
Sujithe8324352009-01-16 21:38:42 +05301805 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301806 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1807 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001808 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1809 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001810 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001811
Felix Fietkau066dae92010-11-07 14:59:39 +01001812 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001813 }
1814
1815 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001816 /*
1817 * Try aggregation if it's a unicast data frame
1818 * and the destination is HT capable.
1819 */
1820 ath_tx_send_ampdu(sc, tid, bf, txctl);
Sujithe8324352009-01-16 21:38:42 +05301821 } else {
Felix Fietkau04caf862010-11-14 15:20:12 +01001822 INIT_LIST_HEAD(&bf_head);
1823 list_add_tail(&bf->list, &bf_head);
1824
Felix Fietkau61117f012010-11-11 03:18:36 +01001825 bf->bf_state.bfs_ftype = txctl->frame_type;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001826 bf->bf_state.bfs_paprd = txctl->paprd;
1827
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001828 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001829 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1830 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001831
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301832 if (txctl->paprd)
1833 bf->bf_state.bfs_paprd_timestamp = jiffies;
1834
Felix Fietkau55195412011-04-17 23:28:09 +02001835 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1836 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
1837
Felix Fietkau248a38d2010-12-10 21:16:46 +01001838 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301839 }
1840
1841 spin_unlock_bh(&txctl->txq->axq_lock);
1842}
1843
1844/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001845int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301846 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001847{
Felix Fietkau28d16702010-11-14 15:20:10 +01001848 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1849 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001850 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001851 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001852 struct ath_txq *txq = txctl->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001853 struct ath_buf *bf;
Felix Fietkau28d16702010-11-14 15:20:10 +01001854 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001855 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001856 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001857
Ben Greeara9927ba2010-12-06 21:13:49 -08001858 /* NOTE: sta can be NULL according to net/mac80211.h */
1859 if (sta)
1860 txctl->an = (struct ath_node *)sta->drv_priv;
1861
Felix Fietkau04caf862010-11-14 15:20:12 +01001862 if (info->control.hw_key)
1863 frmlen += info->control.hw_key->icv_len;
1864
Felix Fietkau28d16702010-11-14 15:20:10 +01001865 /*
1866 * As a temporary workaround, assign seq# here; this will likely need
1867 * to be cleaned up to work better with Beacon transmission and virtual
1868 * BSSes.
1869 */
1870 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1871 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1872 sc->tx.seq_no += 0x10;
1873 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1874 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1875 }
1876
1877 /* Add the padding after the header if this is not already done */
1878 padpos = ath9k_cmn_padpos(hdr->frame_control);
1879 padsize = padpos & 3;
1880 if (padsize && skb->len > padpos) {
1881 if (skb_headroom(skb) < padsize)
1882 return -ENOMEM;
1883
1884 skb_push(skb, padsize);
1885 memmove(skb->data, skb->data + padsize, padpos);
1886 }
1887
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001888 setup_frame_info(hw, skb, frmlen);
1889
1890 /*
1891 * At this point, the vif, hw_key and sta pointers in the tx control
1892 * info are no longer valid (overwritten by the ath_frame_info data.
1893 */
1894
1895 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
Felix Fietkau82b873a2010-11-11 03:18:37 +01001896 if (unlikely(!bf))
1897 return -ENOMEM;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001898
Felix Fietkau066dae92010-11-07 14:59:39 +01001899 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001900 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001901 if (txq == sc->tx.txq_map[q] &&
1902 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001903 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001904 txq->stopped = 1;
1905 }
1906 spin_unlock_bh(&txq->axq_lock);
1907
Sujithe8324352009-01-16 21:38:42 +05301908 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001909
1910 return 0;
1911}
1912
Sujithe8324352009-01-16 21:38:42 +05301913/*****************/
1914/* TX Completion */
1915/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001916
Sujithe8324352009-01-16 21:38:42 +05301917static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01001918 int tx_flags, int ftype, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001919{
Sujithe8324352009-01-16 21:38:42 +05301920 struct ieee80211_hw *hw = sc->hw;
1921 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001922 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001923 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001924 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301925
Joe Perches226afe62010-12-02 19:12:37 -08001926 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301927
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301928 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301929 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301930
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301931 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301932 /* Frame was ACKed */
1933 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1934 }
1935
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001936 padpos = ath9k_cmn_padpos(hdr->frame_control);
1937 padsize = padpos & 3;
1938 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301939 /*
1940 * Remove MAC header padding before giving the frame back to
1941 * mac80211.
1942 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001943 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301944 skb_pull(skb, padsize);
1945 }
1946
Sujith1b04b932010-01-08 10:36:05 +05301947 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1948 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001949 ath_dbg(common, ATH_DBG_PS,
1950 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301951 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1952 PS_WAIT_FOR_CAB |
1953 PS_WAIT_FOR_PSPOLL_DATA |
1954 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001955 }
1956
Felix Fietkau7545daf2011-01-24 19:23:16 +01001957 q = skb_get_queue_mapping(skb);
1958 if (txq == sc->tx.txq_map[q]) {
1959 spin_lock_bh(&txq->axq_lock);
1960 if (WARN_ON(--txq->pending_frames < 0))
1961 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001962
Felix Fietkau7545daf2011-01-24 19:23:16 +01001963 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1964 ieee80211_wake_queue(sc->hw, q);
1965 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001966 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001967 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001968 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001969
1970 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301971}
1972
1973static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001974 struct ath_txq *txq, struct list_head *bf_q,
1975 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05301976{
1977 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05301978 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301979 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05301980
Sujithe8324352009-01-16 21:38:42 +05301981 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301982 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05301983
1984 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301985 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05301986
1987 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301988 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05301989 }
1990
Ben Greearc1739eb32010-10-14 12:45:29 -07001991 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07001992 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04001993
1994 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301995 if (time_after(jiffies,
1996 bf->bf_state.bfs_paprd_timestamp +
1997 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07001998 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07001999 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002000 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002001 } else {
Felix Fietkau5bec3e52011-01-24 21:29:25 +01002002 ath_debug_stat_tx(sc, bf, ts, txq);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002003 ath_tx_complete(sc, skb, tx_flags,
Felix Fietkau61117f012010-11-11 03:18:36 +01002004 bf->bf_state.bfs_ftype, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002005 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002006 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2007 * accidentally reference it later.
2008 */
2009 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302010
2011 /*
2012 * Return the list of ath_buf of this mpdu to free queue
2013 */
2014 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2015 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2016 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2017}
2018
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002019static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2020 struct ath_tx_status *ts, int nframes, int nbad,
2021 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05302022{
Sujitha22be222009-03-30 15:28:36 +05302023 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302024 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302025 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002026 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002027 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302028 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302029
Sujith95e4acb2009-03-13 08:56:09 +05302030 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002031 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302032
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002033 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302034 WARN_ON(tx_rateindex >= hw->max_rates);
2035
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002036 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302037 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02002038 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002039 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302040
Felix Fietkaub572d032010-11-14 15:20:07 +01002041 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002042
Felix Fietkaub572d032010-11-14 15:20:07 +01002043 tx_info->status.ampdu_len = nframes;
2044 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002045 }
2046
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002047 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302048 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002049 /*
2050 * If an underrun error is seen assume it as an excessive
2051 * retry only if max frame trigger level has been reached
2052 * (2 KB for single stream, and 4 KB for dual stream).
2053 * Adjust the long retry as if the frame was tried
2054 * hw->max_rate_tries times to affect how rate control updates
2055 * PER for the failed rate.
2056 * In case of congestion on the bus penalizing this type of
2057 * underruns should help hardware actually transmit new frames
2058 * successfully by eventually preferring slower rates.
2059 * This itself should also alleviate congestion on the bus.
2060 */
2061 if (ieee80211_is_data(hdr->frame_control) &&
2062 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2063 ATH9K_TX_DELIM_UNDERRUN)) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002064 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002065 tx_info->status.rates[tx_rateindex].count =
2066 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302067 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302068
Felix Fietkau545750d2009-11-23 22:21:01 +01002069 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302070 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002071 tx_info->status.rates[i].idx = -1;
2072 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302073
Felix Fietkau78c46532010-06-25 01:26:16 +02002074 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302075}
2076
Sujithc4288392008-11-18 09:09:30 +05302077static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002078{
Sujithcbe61d82009-02-09 13:27:12 +05302079 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002080 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002081 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2082 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302083 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002084 struct ath_tx_status ts;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +05302085 int txok;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002086 int status;
2087
Joe Perches226afe62010-12-02 19:12:37 -08002088 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2089 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2090 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002091
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002092 for (;;) {
2093 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002094 if (list_empty(&txq->axq_q)) {
2095 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002096 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002097 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002098 spin_unlock_bh(&txq->axq_lock);
2099 break;
2100 }
2101 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2102
2103 /*
2104 * There is a race condition that a BH gets scheduled
2105 * after sw writes TxE and before hw re-load the last
2106 * descriptor to get the newly chained one.
2107 * Software must keep the last DONE descriptor as a
2108 * holding descriptor - software does so by marking
2109 * it with the STALE flag.
2110 */
2111 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302112 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002113 bf_held = bf;
2114 if (list_is_last(&bf_held->list, &txq->axq_q)) {
Sujith6ef9b132009-01-16 21:38:51 +05302115 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002116 break;
2117 } else {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002118 bf = list_entry(bf_held->list.next,
Sujith6ef9b132009-01-16 21:38:51 +05302119 struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002120 }
2121 }
2122
2123 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302124 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002125
Felix Fietkau29bffa92010-03-29 20:14:23 -07002126 memset(&ts, 0, sizeof(ts));
2127 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002128 if (status == -EINPROGRESS) {
2129 spin_unlock_bh(&txq->axq_lock);
2130 break;
2131 }
Ben Greear2dac4fb2011-01-09 23:11:45 -08002132 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002133
2134 /*
2135 * Remove ath_buf's of the same transmit unit from txq,
2136 * however leave the last descriptor back as the holding
2137 * descriptor for hw.
2138 */
Sujitha119cc42009-03-30 15:28:38 +05302139 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002140 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002141 if (!list_is_singular(&lastbf->list))
2142 list_cut_position(&bf_head,
2143 &txq->axq_q, lastbf->list.prev);
2144
2145 txq->axq_depth--;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002146 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002147 txq->axq_tx_inprogress = false;
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002148 if (bf_held)
2149 list_del(&bf_held->list);
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002150
2151 if (bf_is_ampdu_not_probing(bf))
2152 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajan69081622011-02-19 01:13:42 -08002153
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002154 spin_unlock_bh(&txq->axq_lock);
2155
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002156 if (bf_held)
2157 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002158
Sujithcd3d39a2008-08-11 14:03:34 +05302159 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002160 /*
2161 * This frame is sent out as a single frame.
2162 * Use hardware retry status for this frame.
2163 */
Felix Fietkau29bffa92010-03-29 20:14:23 -07002164 if (ts.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05302165 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002166 ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002167 }
Johannes Berge6a98542008-10-21 12:40:02 +02002168
Sujithcd3d39a2008-08-11 14:03:34 +05302169 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002170 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2171 true);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002172 else
Felix Fietkau29bffa92010-03-29 20:14:23 -07002173 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002174
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002175 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002176
Felix Fietkau86271e42011-03-11 21:38:19 +01002177 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002178 ath_txq_schedule(sc, txq);
2179 spin_unlock_bh(&txq->axq_lock);
2180 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002181}
2182
Vivek Natarajan181fb182011-01-27 14:45:08 +05302183static void ath_hw_pll_work(struct work_struct *work)
2184{
2185 struct ath_softc *sc = container_of(work, struct ath_softc,
2186 hw_pll_work.work);
2187 static int count;
2188
2189 if (AR_SREV_9485(sc->sc_ah)) {
2190 if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
2191 count++;
2192
2193 if (count == 3) {
2194 /* Rx is hung for more than 500ms. Reset it */
2195 ath_reset(sc, true);
2196 count = 0;
2197 }
2198 } else
2199 count = 0;
2200
2201 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
2202 }
2203}
2204
Sujith305fe472009-07-23 15:32:29 +05302205static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002206{
2207 struct ath_softc *sc = container_of(work, struct ath_softc,
2208 tx_complete_work.work);
2209 struct ath_txq *txq;
2210 int i;
2211 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002212#ifdef CONFIG_ATH9K_DEBUGFS
2213 sc->tx_complete_poll_work_seen++;
2214#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002215
2216 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2217 if (ATH_TXQ_SETUP(sc, i)) {
2218 txq = &sc->tx.txq[i];
2219 spin_lock_bh(&txq->axq_lock);
2220 if (txq->axq_depth) {
2221 if (txq->axq_tx_inprogress) {
2222 needreset = true;
2223 spin_unlock_bh(&txq->axq_lock);
2224 break;
2225 } else {
2226 txq->axq_tx_inprogress = true;
2227 }
2228 }
2229 spin_unlock_bh(&txq->axq_lock);
2230 }
2231
2232 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002233 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2234 "tx hung, resetting the chip\n");
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002235 ath_reset(sc, true);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002236 }
2237
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002238 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002239 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2240}
2241
2242
Sujithe8324352009-01-16 21:38:42 +05302243
2244void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002245{
Sujithe8324352009-01-16 21:38:42 +05302246 int i;
2247 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002248
Sujithe8324352009-01-16 21:38:42 +05302249 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002250
2251 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302252 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2253 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002254 }
2255}
2256
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002257void ath_tx_edma_tasklet(struct ath_softc *sc)
2258{
2259 struct ath_tx_status txs;
2260 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2261 struct ath_hw *ah = sc->sc_ah;
2262 struct ath_txq *txq;
2263 struct ath_buf *bf, *lastbf;
2264 struct list_head bf_head;
2265 int status;
2266 int txok;
2267
2268 for (;;) {
2269 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2270 if (status == -EINPROGRESS)
2271 break;
2272 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002273 ath_dbg(common, ATH_DBG_XMIT,
2274 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002275 break;
2276 }
2277
2278 /* Skip beacon completions */
2279 if (txs.qid == sc->beacon.beaconq)
2280 continue;
2281
2282 txq = &sc->tx.txq[txs.qid];
2283
2284 spin_lock_bh(&txq->axq_lock);
2285 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2286 spin_unlock_bh(&txq->axq_lock);
2287 return;
2288 }
2289
2290 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2291 struct ath_buf, list);
2292 lastbf = bf->bf_lastbf;
2293
2294 INIT_LIST_HEAD(&bf_head);
2295 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2296 &lastbf->list);
2297 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2298 txq->axq_depth--;
2299 txq->axq_tx_inprogress = false;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01002300 if (bf_is_ampdu_not_probing(bf))
2301 txq->axq_ampdu_depth--;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002302 spin_unlock_bh(&txq->axq_lock);
2303
2304 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2305
2306 if (!bf_isampdu(bf)) {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002307 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2308 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002309 ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002310 }
2311
2312 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01002313 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2314 txok, true);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002315 else
2316 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2317 &txs, txok, 0);
2318
2319 spin_lock_bh(&txq->axq_lock);
Ben Greear60f2d1d2011-01-09 23:11:52 -08002320
Felix Fietkau86271e42011-03-11 21:38:19 +01002321 if (!list_empty(&txq->txq_fifo_pending)) {
2322 INIT_LIST_HEAD(&bf_head);
2323 bf = list_first_entry(&txq->txq_fifo_pending,
2324 struct ath_buf, list);
2325 list_cut_position(&bf_head,
2326 &txq->txq_fifo_pending,
2327 &bf->bf_lastbf->list);
2328 ath_tx_txqaddbuf(sc, txq, &bf_head);
2329 } else if (sc->sc_flags & SC_OP_TXAGGR)
2330 ath_txq_schedule(sc, txq);
2331
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002332 spin_unlock_bh(&txq->axq_lock);
2333 }
2334}
2335
Sujithe8324352009-01-16 21:38:42 +05302336/*****************/
2337/* Init, Cleanup */
2338/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002339
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002340static int ath_txstatus_setup(struct ath_softc *sc, int size)
2341{
2342 struct ath_descdma *dd = &sc->txsdma;
2343 u8 txs_len = sc->sc_ah->caps.txs_len;
2344
2345 dd->dd_desc_len = size * txs_len;
2346 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2347 &dd->dd_desc_paddr, GFP_KERNEL);
2348 if (!dd->dd_desc)
2349 return -ENOMEM;
2350
2351 return 0;
2352}
2353
2354static int ath_tx_edma_init(struct ath_softc *sc)
2355{
2356 int err;
2357
2358 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2359 if (!err)
2360 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2361 sc->txsdma.dd_desc_paddr,
2362 ATH_TXSTATUS_RING_SIZE);
2363
2364 return err;
2365}
2366
2367static void ath_tx_edma_cleanup(struct ath_softc *sc)
2368{
2369 struct ath_descdma *dd = &sc->txsdma;
2370
2371 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2372 dd->dd_desc_paddr);
2373}
2374
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002375int ath_tx_init(struct ath_softc *sc, int nbufs)
2376{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002377 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002378 int error = 0;
2379
Sujith797fe5cb2009-03-30 15:28:45 +05302380 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002381
Sujith797fe5cb2009-03-30 15:28:45 +05302382 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002383 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302384 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002385 ath_err(common,
2386 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302387 goto err;
2388 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002389
Sujith797fe5cb2009-03-30 15:28:45 +05302390 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002391 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302392 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002393 ath_err(common,
2394 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302395 goto err;
2396 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002397
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002398 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
Vivek Natarajan181fb182011-01-27 14:45:08 +05302399 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002400
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002401 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2402 error = ath_tx_edma_init(sc);
2403 if (error)
2404 goto err;
2405 }
2406
Sujith797fe5cb2009-03-30 15:28:45 +05302407err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002408 if (error != 0)
2409 ath_tx_cleanup(sc);
2410
2411 return error;
2412}
2413
Sujith797fe5cb2009-03-30 15:28:45 +05302414void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002415{
Sujithb77f4832008-12-07 21:44:03 +05302416 if (sc->beacon.bdma.dd_desc_len != 0)
2417 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002418
Sujithb77f4832008-12-07 21:44:03 +05302419 if (sc->tx.txdma.dd_desc_len != 0)
2420 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002421
2422 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2423 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002424}
2425
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002426void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2427{
Sujithc5170162008-10-29 10:13:59 +05302428 struct ath_atx_tid *tid;
2429 struct ath_atx_ac *ac;
2430 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002431
Sujith8ee5afb2008-12-07 21:43:36 +05302432 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302433 tidno < WME_NUM_TID;
2434 tidno++, tid++) {
2435 tid->an = an;
2436 tid->tidno = tidno;
2437 tid->seq_start = tid->seq_next = 0;
2438 tid->baw_size = WME_MAX_BA;
2439 tid->baw_head = tid->baw_tail = 0;
2440 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302441 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302442 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302443 INIT_LIST_HEAD(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302444 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302445 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302446 tid->state &= ~AGGR_ADDBA_COMPLETE;
2447 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302448 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002449
Sujith8ee5afb2008-12-07 21:43:36 +05302450 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302451 acno < WME_NUM_AC; acno++, ac++) {
2452 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002453 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302454 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002455 }
2456}
2457
Sujithb5aa9bf2008-10-29 10:13:31 +05302458void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002459{
Felix Fietkau2b409942010-07-07 19:42:08 +02002460 struct ath_atx_ac *ac;
2461 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002462 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002463 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302464
Felix Fietkau2b409942010-07-07 19:42:08 +02002465 for (tidno = 0, tid = &an->tid[tidno];
2466 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002467
Felix Fietkau2b409942010-07-07 19:42:08 +02002468 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002469 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002470
Felix Fietkau2b409942010-07-07 19:42:08 +02002471 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002472
Felix Fietkau2b409942010-07-07 19:42:08 +02002473 if (tid->sched) {
2474 list_del(&tid->list);
2475 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002476 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002477
2478 if (ac->sched) {
2479 list_del(&ac->list);
2480 tid->ac->sched = false;
2481 }
2482
2483 ath_tid_drain(sc, txq, tid);
2484 tid->state &= ~AGGR_ADDBA_COMPLETE;
2485 tid->state &= ~AGGR_CLEANUP;
2486
2487 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002488 }
2489}