blob: 29bcc55a6f9e71d1f1c49752886e8e699d7f072f [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
Sujith Manoharan5b681382011-05-17 13:36:18 +05302 * Copyright (c) 2008-2011 Atheros Communications Inc.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000017#include <linux/dma-mapping.h>
Sujith394cf0a2009-02-09 13:26:54 +053018#include "ath9k.h"
Luis R. Rodriguezb622a722010-04-15 17:39:28 -040019#include "ar9003_mac.h"
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070020
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070023#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070035
Felix Fietkauc6663872010-04-19 19:57:33 +020036static u16 bits_per_symbol[][2] = {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070037 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070046};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
Felix Fietkau82b873a2010-11-11 03:18:37 +010050static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +020051 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
Sujithe8324352009-01-16 21:38:42 +053054static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -070055 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
Sujithe8324352009-01-16 21:38:42 +053057static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +020058 struct list_head *head, bool internal);
Felix Fietkau269c44b2010-11-14 15:20:06 +010059static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +010060static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
61 struct ath_tx_status *ts, int nframes, int nbad,
62 int txok, bool update_rc);
Felix Fietkau90fa5392010-09-20 13:45:38 +020063static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
64 int seqno);
Felix Fietkau44f1d262011-08-28 00:32:25 +020065static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
66 struct ath_txq *txq,
67 struct ath_atx_tid *tid,
68 struct sk_buff *skb);
Sujithe8324352009-01-16 21:38:42 +053069
Felix Fietkau545750d2009-11-23 22:21:01 +010070enum {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020071 MCS_HT20,
72 MCS_HT20_SGI,
Felix Fietkau545750d2009-11-23 22:21:01 +010073 MCS_HT40,
74 MCS_HT40_SGI,
75};
76
Felix Fietkau0e668cd2010-04-19 19:57:32 +020077static int ath_max_4ms_framelen[4][32] = {
78 [MCS_HT20] = {
79 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
80 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
81 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
82 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
83 },
84 [MCS_HT20_SGI] = {
85 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
86 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
87 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
88 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010089 },
90 [MCS_HT40] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020091 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
92 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
93 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
94 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +010095 },
96 [MCS_HT40_SGI] = {
Felix Fietkau0e668cd2010-04-19 19:57:32 +020097 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
98 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
99 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
100 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
Felix Fietkau545750d2009-11-23 22:21:01 +0100101 }
102};
103
Sujithe8324352009-01-16 21:38:42 +0530104/*********************/
105/* Aggregation logic */
106/*********************/
107
Sujithe8324352009-01-16 21:38:42 +0530108static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
109{
110 struct ath_atx_ac *ac = tid->ac;
111
112 if (tid->paused)
113 return;
114
115 if (tid->sched)
116 return;
117
118 tid->sched = true;
119 list_add_tail(&tid->list, &ac->tid_q);
120
121 if (ac->sched)
122 return;
123
124 ac->sched = true;
125 list_add_tail(&ac->list, &txq->axq_acq);
126}
127
Sujithe8324352009-01-16 21:38:42 +0530128static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
129{
Felix Fietkau066dae92010-11-07 14:59:39 +0100130 struct ath_txq *txq = tid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530131
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200132 WARN_ON(!tid->paused);
133
Sujithe8324352009-01-16 21:38:42 +0530134 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200135 tid->paused = false;
Sujithe8324352009-01-16 21:38:42 +0530136
Felix Fietkau56dc6332011-08-28 00:32:22 +0200137 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530138 goto unlock;
139
140 ath_tx_queue_tid(txq, tid);
141 ath_txq_schedule(sc, txq);
142unlock:
143 spin_unlock_bh(&txq->axq_lock);
144}
145
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100146static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
Felix Fietkau76e45222010-11-14 15:20:08 +0100147{
148 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100149 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
150 sizeof(tx_info->rate_driver_data));
151 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
Felix Fietkau76e45222010-11-14 15:20:08 +0100152}
153
Sujithe8324352009-01-16 21:38:42 +0530154static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
155{
Felix Fietkau066dae92010-11-07 14:59:39 +0100156 struct ath_txq *txq = tid->ac->txq;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200157 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530158 struct ath_buf *bf;
159 struct list_head bf_head;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200160 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100161 struct ath_frame_info *fi;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200162
Sujithe8324352009-01-16 21:38:42 +0530163 INIT_LIST_HEAD(&bf_head);
164
Felix Fietkau90fa5392010-09-20 13:45:38 +0200165 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530166 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530167
Felix Fietkau56dc6332011-08-28 00:32:22 +0200168 while ((skb = __skb_dequeue(&tid->buf_q))) {
169 fi = get_frame_info(skb);
170 bf = fi->bf;
171
Felix Fietkaue1566d12010-11-20 03:08:46 +0100172 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau44f1d262011-08-28 00:32:25 +0200173 if (bf && fi->retries) {
174 list_add_tail(&bf->list, &bf_head);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200175 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Felix Fietkau7d2c16b2011-03-12 01:11:28 +0100176 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200177 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +0200178 ath_tx_send_normal(sc, txq, NULL, skb);
Felix Fietkau90fa5392010-09-20 13:45:38 +0200179 }
Felix Fietkaue1566d12010-11-20 03:08:46 +0100180 spin_lock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530181 }
182
183 spin_unlock_bh(&txq->axq_lock);
184}
185
186static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
187 int seqno)
188{
189 int index, cindex;
190
191 index = ATH_BA_INDEX(tid->seq_start, seqno);
192 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
193
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200194 __clear_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530195
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200196 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
Sujithe8324352009-01-16 21:38:42 +0530197 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
198 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
199 }
200}
201
202static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100203 u16 seqno)
Sujithe8324352009-01-16 21:38:42 +0530204{
205 int index, cindex;
206
Felix Fietkau2d3bcba2010-11-14 15:20:01 +0100207 index = ATH_BA_INDEX(tid->seq_start, seqno);
Sujithe8324352009-01-16 21:38:42 +0530208 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
Felix Fietkau81ee13b2010-09-20 13:45:36 +0200209 __set_bit(cindex, tid->tx_buf);
Sujithe8324352009-01-16 21:38:42 +0530210
211 if (index >= ((tid->baw_tail - tid->baw_head) &
212 (ATH_TID_MAX_BUFS - 1))) {
213 tid->baw_tail = cindex;
214 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
215 }
216}
217
218/*
219 * TODO: For frame(s) that are in the retry state, we will reuse the
220 * sequence number(s) without setting the retry bit. The
221 * alternative is to give up on these and BAR the receiver's window
222 * forward.
223 */
224static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
225 struct ath_atx_tid *tid)
226
227{
Felix Fietkau56dc6332011-08-28 00:32:22 +0200228 struct sk_buff *skb;
Sujithe8324352009-01-16 21:38:42 +0530229 struct ath_buf *bf;
230 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700231 struct ath_tx_status ts;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100232 struct ath_frame_info *fi;
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700233
234 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +0530235 INIT_LIST_HEAD(&bf_head);
236
Felix Fietkau56dc6332011-08-28 00:32:22 +0200237 while ((skb = __skb_dequeue(&tid->buf_q))) {
238 fi = get_frame_info(skb);
239 bf = fi->bf;
Sujithe8324352009-01-16 21:38:42 +0530240
Felix Fietkau44f1d262011-08-28 00:32:25 +0200241 if (!bf) {
242 spin_unlock(&txq->axq_lock);
243 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
244 spin_lock(&txq->axq_lock);
245 continue;
246 }
247
Felix Fietkau56dc6332011-08-28 00:32:22 +0200248 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530249
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100250 if (fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200251 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +0530252
253 spin_unlock(&txq->axq_lock);
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700254 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Sujithe8324352009-01-16 21:38:42 +0530255 spin_lock(&txq->axq_lock);
256 }
257
258 tid->seq_next = tid->seq_start;
259 tid->baw_tail = tid->baw_head;
260}
261
Sujithfec247c2009-07-27 12:08:16 +0530262static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100263 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +0530264{
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100265 struct ath_frame_info *fi = get_frame_info(skb);
Sujithe8324352009-01-16 21:38:42 +0530266 struct ieee80211_hdr *hdr;
267
Sujithfec247c2009-07-27 12:08:16 +0530268 TX_STAT_INC(txq->axq_qnum, a_retries);
Felix Fietkau8b7f8532010-11-28 19:37:48 +0100269 if (fi->retries++ > 0)
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100270 return;
Sujithe8324352009-01-16 21:38:42 +0530271
Sujithe8324352009-01-16 21:38:42 +0530272 hdr = (struct ieee80211_hdr *)skb->data;
273 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
274}
275
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200276static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
277{
278 struct ath_buf *bf = NULL;
279
280 spin_lock_bh(&sc->tx.txbuflock);
281
282 if (unlikely(list_empty(&sc->tx.txbuf))) {
283 spin_unlock_bh(&sc->tx.txbuflock);
284 return NULL;
285 }
286
287 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
288 list_del(&bf->list);
289
290 spin_unlock_bh(&sc->tx.txbuflock);
291
292 return bf;
293}
294
295static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
296{
297 spin_lock_bh(&sc->tx.txbuflock);
298 list_add_tail(&bf->list, &sc->tx.txbuf);
299 spin_unlock_bh(&sc->tx.txbuflock);
300}
301
Sujithd43f30152009-01-16 21:38:53 +0530302static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
303{
304 struct ath_buf *tbf;
305
Felix Fietkau0a8cea82010-04-19 19:57:30 +0200306 tbf = ath_tx_get_buffer(sc);
307 if (WARN_ON(!tbf))
Vasanthakumar Thiagarajan8a460972009-06-10 17:50:09 +0530308 return NULL;
Sujithd43f30152009-01-16 21:38:53 +0530309
310 ATH_TXBUF_RESET(tbf);
311
312 tbf->bf_mpdu = bf->bf_mpdu;
313 tbf->bf_buf_addr = bf->bf_buf_addr;
Vasanthakumar Thiagarajand826c832010-04-15 17:38:45 -0400314 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
Sujithd43f30152009-01-16 21:38:53 +0530315 tbf->bf_state = bf->bf_state;
Sujithd43f30152009-01-16 21:38:53 +0530316
317 return tbf;
318}
319
Felix Fietkaub572d032010-11-14 15:20:07 +0100320static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
321 struct ath_tx_status *ts, int txok,
322 int *nframes, int *nbad)
323{
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100324 struct ath_frame_info *fi;
Felix Fietkaub572d032010-11-14 15:20:07 +0100325 u16 seq_st = 0;
326 u32 ba[WME_BA_BMP_SIZE >> 5];
327 int ba_index;
328 int isaggr = 0;
329
330 *nbad = 0;
331 *nframes = 0;
332
Felix Fietkaub572d032010-11-14 15:20:07 +0100333 isaggr = bf_isaggr(bf);
334 if (isaggr) {
335 seq_st = ts->ts_seqnum;
336 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
337 }
338
339 while (bf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100340 fi = get_frame_info(bf->bf_mpdu);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200341 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
Felix Fietkaub572d032010-11-14 15:20:07 +0100342
343 (*nframes)++;
344 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
345 (*nbad)++;
346
347 bf = bf->bf_next;
348 }
349}
350
351
Sujithd43f30152009-01-16 21:38:53 +0530352static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
353 struct ath_buf *bf, struct list_head *bf_q,
Felix Fietkauc5992612010-11-14 15:20:09 +0100354 struct ath_tx_status *ts, int txok, bool retry)
Sujithe8324352009-01-16 21:38:42 +0530355{
356 struct ath_node *an = NULL;
357 struct sk_buff *skb;
Sujith1286ec62009-01-27 13:30:37 +0530358 struct ieee80211_sta *sta;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100359 struct ieee80211_hw *hw = sc->hw;
Sujith1286ec62009-01-27 13:30:37 +0530360 struct ieee80211_hdr *hdr;
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800361 struct ieee80211_tx_info *tx_info;
Sujithe8324352009-01-16 21:38:42 +0530362 struct ath_atx_tid *tid = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530363 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200364 struct list_head bf_head;
365 struct sk_buff_head bf_pending;
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530366 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
Sujithe8324352009-01-16 21:38:42 +0530367 u32 ba[WME_BA_BMP_SIZE >> 5];
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530368 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
369 bool rc_update = true;
Felix Fietkau78c46532010-06-25 01:26:16 +0200370 struct ieee80211_tx_rate rates[4];
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100371 struct ath_frame_info *fi;
Björn Smedmanebd02282010-10-10 22:44:39 +0200372 int nframes;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100373 u8 tidno;
Felix Fietkau55195412011-04-17 23:28:09 +0200374 bool clear_filter;
Sujithe8324352009-01-16 21:38:42 +0530375
Sujitha22be222009-03-30 15:28:36 +0530376 skb = bf->bf_mpdu;
Sujith1286ec62009-01-27 13:30:37 +0530377 hdr = (struct ieee80211_hdr *)skb->data;
Sujithe8324352009-01-16 21:38:42 +0530378
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800379 tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguez76d5a9e2009-11-02 16:08:34 -0800380
Felix Fietkau78c46532010-06-25 01:26:16 +0200381 memcpy(rates, tx_info->control.rates, sizeof(rates));
382
Sujith1286ec62009-01-27 13:30:37 +0530383 rcu_read_lock();
384
Ben Greear686b9cb2010-09-23 09:44:36 -0700385 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
Sujith1286ec62009-01-27 13:30:37 +0530386 if (!sta) {
387 rcu_read_unlock();
Felix Fietkau73e19462010-07-07 19:42:09 +0200388
Felix Fietkau31e79a52010-07-12 23:16:34 +0200389 INIT_LIST_HEAD(&bf_head);
390 while (bf) {
391 bf_next = bf->bf_next;
392
393 bf->bf_state.bf_type |= BUF_XRETRY;
Felix Fietkaufce041b2011-05-19 12:20:25 +0200394 if (!bf->bf_stale || bf_next != NULL)
Felix Fietkau31e79a52010-07-12 23:16:34 +0200395 list_move_tail(&bf->list, &bf_head);
396
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100397 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
Felix Fietkau31e79a52010-07-12 23:16:34 +0200398 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
399 0, 0);
400
401 bf = bf_next;
402 }
Sujith1286ec62009-01-27 13:30:37 +0530403 return;
Sujithe8324352009-01-16 21:38:42 +0530404 }
405
Sujith1286ec62009-01-27 13:30:37 +0530406 an = (struct ath_node *)sta->drv_priv;
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100407 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
408 tid = ATH_AN_2_TID(an, tidno);
Sujith1286ec62009-01-27 13:30:37 +0530409
Felix Fietkaub11b1602010-07-11 12:48:44 +0200410 /*
411 * The hardware occasionally sends a tx status for the wrong TID.
412 * In this case, the BA status cannot be considered valid and all
413 * subframes need to be retransmitted
414 */
Felix Fietkau5daefbd2010-11-14 15:20:02 +0100415 if (tidno != ts->tid)
Felix Fietkaub11b1602010-07-11 12:48:44 +0200416 txok = false;
417
Sujithe8324352009-01-16 21:38:42 +0530418 isaggr = bf_isaggr(bf);
Sujithd43f30152009-01-16 21:38:53 +0530419 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530420
Sujithd43f30152009-01-16 21:38:53 +0530421 if (isaggr && txok) {
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700422 if (ts->ts_flags & ATH9K_TX_BA) {
423 seq_st = ts->ts_seqnum;
424 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
Sujithe8324352009-01-16 21:38:42 +0530425 } else {
Sujithd43f30152009-01-16 21:38:53 +0530426 /*
427 * AR5416 can become deaf/mute when BA
428 * issue happens. Chip needs to be reset.
429 * But AP code may have sychronization issues
430 * when perform internal reset in this routine.
431 * Only enable reset in STA mode for now.
432 */
Sujith2660b812009-02-09 13:27:26 +0530433 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
Sujithd43f30152009-01-16 21:38:53 +0530434 needreset = 1;
Sujithe8324352009-01-16 21:38:42 +0530435 }
436 }
437
Felix Fietkau56dc6332011-08-28 00:32:22 +0200438 __skb_queue_head_init(&bf_pending);
Sujithe8324352009-01-16 21:38:42 +0530439
Felix Fietkaub572d032010-11-14 15:20:07 +0100440 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
Sujithe8324352009-01-16 21:38:42 +0530441 while (bf) {
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200442 u16 seqno = bf->bf_state.seqno;
443
Felix Fietkauf0b82202011-01-15 14:30:15 +0100444 txfail = txpending = sendbar = 0;
Sujithe8324352009-01-16 21:38:42 +0530445 bf_next = bf->bf_next;
446
Felix Fietkau78c46532010-06-25 01:26:16 +0200447 skb = bf->bf_mpdu;
448 tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100449 fi = get_frame_info(skb);
Felix Fietkau78c46532010-06-25 01:26:16 +0200450
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200451 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
Sujithe8324352009-01-16 21:38:42 +0530452 /* transmit completion, subframe is
453 * acked by block ack */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530454 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530455 } else if (!isaggr && txok) {
456 /* transmit completion */
Vasanthakumar Thiagarajan0934af22009-03-18 20:22:00 +0530457 acked_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530458 } else {
Felix Fietkau55195412011-04-17 23:28:09 +0200459 if ((tid->state & AGGR_CLEANUP) || !retry) {
Sujithe8324352009-01-16 21:38:42 +0530460 /*
461 * cleanup in progress, just fail
462 * the un-acked sub-frames
463 */
464 txfail = 1;
Felix Fietkau55195412011-04-17 23:28:09 +0200465 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
466 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
467 !an->sleeping)
468 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
469
470 clear_filter = true;
471 txpending = 1;
472 } else {
473 bf->bf_state.bf_type |= BUF_XRETRY;
474 txfail = 1;
475 sendbar = 1;
476 txfail_cnt++;
Sujithe8324352009-01-16 21:38:42 +0530477 }
478 }
479
Felix Fietkaufce041b2011-05-19 12:20:25 +0200480 /*
481 * Make sure the last desc is reclaimed if it
482 * not a holding desc.
483 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200484 INIT_LIST_HEAD(&bf_head);
485 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
486 bf_next != NULL || !bf_last->bf_stale)
Sujithd43f30152009-01-16 21:38:53 +0530487 list_move_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +0530488
Felix Fietkau90fa5392010-09-20 13:45:38 +0200489 if (!txpending || (tid->state & AGGR_CLEANUP)) {
Sujithe8324352009-01-16 21:38:42 +0530490 /*
491 * complete the acked-ones/xretried ones; update
492 * block-ack window
493 */
494 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200495 ath_tx_update_baw(sc, tid, seqno);
Sujithe8324352009-01-16 21:38:42 +0530496 spin_unlock_bh(&txq->axq_lock);
497
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530498 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
Felix Fietkau78c46532010-06-25 01:26:16 +0200499 memcpy(tx_info->control.rates, rates, sizeof(rates));
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100500 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530501 rc_update = false;
502 } else {
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100503 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +0530504 }
505
Felix Fietkaudb1a0522010-03-29 20:07:11 -0700506 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
507 !txfail, sendbar);
Sujithe8324352009-01-16 21:38:42 +0530508 } else {
Sujithd43f30152009-01-16 21:38:53 +0530509 /* retry the un-acked ones */
Felix Fietkau55195412011-04-17 23:28:09 +0200510 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400511 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
512 if (bf->bf_next == NULL && bf_last->bf_stale) {
513 struct ath_buf *tbf;
Sujithe8324352009-01-16 21:38:42 +0530514
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400515 tbf = ath_clone_txbuf(sc, bf_last);
516 /*
517 * Update tx baw and complete the
518 * frame with failed status if we
519 * run out of tx buf.
520 */
521 if (!tbf) {
522 spin_lock_bh(&txq->axq_lock);
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200523 ath_tx_update_baw(sc, tid, seqno);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400524 spin_unlock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400525
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400526 bf->bf_state.bf_type |=
527 BUF_XRETRY;
Felix Fietkau0cdd5c62011-01-24 19:23:17 +0100528 ath_tx_rc_status(sc, bf, ts, nframes,
Felix Fietkaub572d032010-11-14 15:20:07 +0100529 nbad, 0, false);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400530 ath_tx_complete_buf(sc, bf, txq,
531 &bf_head,
532 ts, 0, 0);
533 break;
534 }
535
536 ath9k_hw_cleartxdesc(sc->sc_ah,
537 tbf->bf_desc);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200538 fi->bf = tbf;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -0400539 } else {
540 /*
541 * Clear descriptor status words for
542 * software retry
543 */
544 ath9k_hw_cleartxdesc(sc->sc_ah,
545 bf->bf_desc);
Vasanthakumar Thiagarajanc41d92d2009-07-14 20:17:11 -0400546 }
Sujithe8324352009-01-16 21:38:42 +0530547 }
548
549 /*
550 * Put this buffer to the temporary pending
551 * queue to retain ordering
552 */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200553 __skb_queue_tail(&bf_pending, skb);
Sujithe8324352009-01-16 21:38:42 +0530554 }
555
556 bf = bf_next;
557 }
558
Felix Fietkau4cee7862010-07-23 03:53:16 +0200559 /* prepend un-acked frames to the beginning of the pending frame queue */
Felix Fietkau56dc6332011-08-28 00:32:22 +0200560 if (!skb_queue_empty(&bf_pending)) {
Felix Fietkau55195412011-04-17 23:28:09 +0200561 if (an->sleeping)
562 ieee80211_sta_set_tim(sta);
563
Felix Fietkau4cee7862010-07-23 03:53:16 +0200564 spin_lock_bh(&txq->axq_lock);
Felix Fietkau55195412011-04-17 23:28:09 +0200565 if (clear_filter)
566 tid->ac->clear_ps_filter = true;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200567 skb_queue_splice(&bf_pending, &tid->buf_q);
Felix Fietkau9af73cf2011-08-10 15:23:35 -0600568 if (!an->sleeping)
569 ath_tx_queue_tid(txq, tid);
Felix Fietkau4cee7862010-07-23 03:53:16 +0200570 spin_unlock_bh(&txq->axq_lock);
571 }
572
Sujithe8324352009-01-16 21:38:42 +0530573 if (tid->state & AGGR_CLEANUP) {
Felix Fietkau90fa5392010-09-20 13:45:38 +0200574 ath_tx_flush_tid(sc, tid);
575
Sujithe8324352009-01-16 21:38:42 +0530576 if (tid->baw_head == tid->baw_tail) {
577 tid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithe8324352009-01-16 21:38:42 +0530578 tid->state &= ~AGGR_CLEANUP;
Sujithd43f30152009-01-16 21:38:53 +0530579 }
Sujithe8324352009-01-16 21:38:42 +0530580 }
581
Sujith1286ec62009-01-27 13:30:37 +0530582 rcu_read_unlock();
583
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +0530584 if (needreset)
Sujithe8324352009-01-16 21:38:42 +0530585 ath_reset(sc, false);
Sujithe8324352009-01-16 21:38:42 +0530586}
587
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530588static bool ath_lookup_legacy(struct ath_buf *bf)
589{
590 struct sk_buff *skb;
591 struct ieee80211_tx_info *tx_info;
592 struct ieee80211_tx_rate *rates;
593 int i;
594
595 skb = bf->bf_mpdu;
596 tx_info = IEEE80211_SKB_CB(skb);
597 rates = tx_info->control.rates;
598
Felix Fietkau059ee092011-08-27 10:25:27 +0200599 for (i = 0; i < 4; i++) {
600 if (!rates[i].count || rates[i].idx < 0)
601 break;
602
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530603 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
604 return true;
605 }
606
607 return false;
608}
609
Sujithe8324352009-01-16 21:38:42 +0530610static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
611 struct ath_atx_tid *tid)
612{
Sujithe8324352009-01-16 21:38:42 +0530613 struct sk_buff *skb;
614 struct ieee80211_tx_info *tx_info;
615 struct ieee80211_tx_rate *rates;
Sujithd43f30152009-01-16 21:38:53 +0530616 u32 max_4ms_framelen, frmlen;
Sujith4ef70842009-07-23 15:32:41 +0530617 u16 aggr_limit, legacy = 0;
Sujithe8324352009-01-16 21:38:42 +0530618 int i;
619
Sujitha22be222009-03-30 15:28:36 +0530620 skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +0530621 tx_info = IEEE80211_SKB_CB(skb);
622 rates = tx_info->control.rates;
Sujithe8324352009-01-16 21:38:42 +0530623
624 /*
625 * Find the lowest frame length among the rate series that will have a
626 * 4ms transmit duration.
627 * TODO - TXOP limit needs to be considered.
628 */
629 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
630
631 for (i = 0; i < 4; i++) {
632 if (rates[i].count) {
Felix Fietkau545750d2009-11-23 22:21:01 +0100633 int modeidx;
634 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
Sujithe8324352009-01-16 21:38:42 +0530635 legacy = 1;
636 break;
637 }
638
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200639 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
Felix Fietkau545750d2009-11-23 22:21:01 +0100640 modeidx = MCS_HT40;
641 else
Felix Fietkau0e668cd2010-04-19 19:57:32 +0200642 modeidx = MCS_HT20;
643
644 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
645 modeidx++;
Felix Fietkau545750d2009-11-23 22:21:01 +0100646
647 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
Sujithd43f30152009-01-16 21:38:53 +0530648 max_4ms_framelen = min(max_4ms_framelen, frmlen);
Sujithe8324352009-01-16 21:38:42 +0530649 }
650 }
651
652 /*
653 * limit aggregate size by the minimum rate if rate selected is
654 * not a probe rate, if rate selected is a probe rate then
655 * avoid aggregation of this packet.
656 */
657 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
658 return 0;
659
Vasanthakumar Thiagarajan17739122009-08-26 21:08:50 +0530660 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
661 aggr_limit = min((max_4ms_framelen * 3) / 8,
662 (u32)ATH_AMPDU_LIMIT_MAX);
663 else
664 aggr_limit = min(max_4ms_framelen,
665 (u32)ATH_AMPDU_LIMIT_MAX);
Sujithe8324352009-01-16 21:38:42 +0530666
667 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300668 * h/w can accept aggregates up to 16 bit lengths (65535).
669 * The IE, however can hold up to 65536, which shows up here
Sujithe8324352009-01-16 21:38:42 +0530670 * as zero. Ignore 65536 since we are constrained by hw.
671 */
Sujith4ef70842009-07-23 15:32:41 +0530672 if (tid->an->maxampdu)
673 aggr_limit = min(aggr_limit, tid->an->maxampdu);
Sujithe8324352009-01-16 21:38:42 +0530674
675 return aggr_limit;
676}
677
678/*
Sujithd43f30152009-01-16 21:38:53 +0530679 * Returns the number of delimiters to be added to
Sujithe8324352009-01-16 21:38:42 +0530680 * meet the minimum required mpdudensity.
Sujithe8324352009-01-16 21:38:42 +0530681 */
682static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530683 struct ath_buf *bf, u16 frmlen,
684 bool first_subfrm)
Sujithe8324352009-01-16 21:38:42 +0530685{
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530686#define FIRST_DESC_NDELIMS 60
Sujithe8324352009-01-16 21:38:42 +0530687 struct sk_buff *skb = bf->bf_mpdu;
688 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Sujith4ef70842009-07-23 15:32:41 +0530689 u32 nsymbits, nsymbols;
Sujithe8324352009-01-16 21:38:42 +0530690 u16 minlen;
Felix Fietkau545750d2009-11-23 22:21:01 +0100691 u8 flags, rix;
Felix Fietkauc6663872010-04-19 19:57:33 +0200692 int width, streams, half_gi, ndelim, mindelim;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100693 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
Sujithe8324352009-01-16 21:38:42 +0530694
695 /* Select standard number of delimiters based on frame length alone */
696 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
697
698 /*
699 * If encryption enabled, hardware requires some more padding between
700 * subframes.
701 * TODO - this could be improved to be dependent on the rate.
702 * The hardware can keep up at lower rates, but not higher rates
703 */
Rajkumar Manoharan4f6760b2011-07-01 18:37:33 +0530704 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
705 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
Sujithe8324352009-01-16 21:38:42 +0530706 ndelim += ATH_AGGR_ENCRYPTDELIM;
707
708 /*
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530709 * Add delimiter when using RTS/CTS with aggregation
710 * and non enterprise AR9003 card
711 */
Felix Fietkau34597312011-08-29 18:57:54 +0200712 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
713 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530714 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
715
716 /*
Sujithe8324352009-01-16 21:38:42 +0530717 * Convert desired mpdu density from microeconds to bytes based
718 * on highest rate in rate series (i.e. first rate) to determine
719 * required minimum length for subframe. Take into account
720 * whether high rate is 20 or 40Mhz and half or full GI.
Sujith4ef70842009-07-23 15:32:41 +0530721 *
Sujithe8324352009-01-16 21:38:42 +0530722 * If there is no mpdu density restriction, no further calculation
723 * is needed.
724 */
Sujith4ef70842009-07-23 15:32:41 +0530725
726 if (tid->an->mpdudensity == 0)
Sujithe8324352009-01-16 21:38:42 +0530727 return ndelim;
728
729 rix = tx_info->control.rates[0].idx;
730 flags = tx_info->control.rates[0].flags;
Sujithe8324352009-01-16 21:38:42 +0530731 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
732 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
733
734 if (half_gi)
Sujith4ef70842009-07-23 15:32:41 +0530735 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530736 else
Sujith4ef70842009-07-23 15:32:41 +0530737 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
Sujithe8324352009-01-16 21:38:42 +0530738
739 if (nsymbols == 0)
740 nsymbols = 1;
741
Felix Fietkauc6663872010-04-19 19:57:33 +0200742 streams = HT_RC_2_STREAMS(rix);
743 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Sujithe8324352009-01-16 21:38:42 +0530744 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
745
Sujithe8324352009-01-16 21:38:42 +0530746 if (frmlen < minlen) {
Sujithe8324352009-01-16 21:38:42 +0530747 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
748 ndelim = max(mindelim, ndelim);
749 }
750
751 return ndelim;
752}
753
754static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
Sujithfec247c2009-07-27 12:08:16 +0530755 struct ath_txq *txq,
Sujithd43f30152009-01-16 21:38:53 +0530756 struct ath_atx_tid *tid,
Felix Fietkau269c44b2010-11-14 15:20:06 +0100757 struct list_head *bf_q,
758 int *aggr_len)
Sujithe8324352009-01-16 21:38:42 +0530759{
760#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
Felix Fietkau56dc6332011-08-28 00:32:22 +0200761 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
Sujithd43f30152009-01-16 21:38:53 +0530762 int rl = 0, nframes = 0, ndelim, prev_al = 0;
Sujithe8324352009-01-16 21:38:42 +0530763 u16 aggr_limit = 0, al = 0, bpad = 0,
764 al_delta, h_baw = tid->baw_size / 2;
765 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Felix Fietkau0299a502010-10-21 02:47:24 +0200766 struct ieee80211_tx_info *tx_info;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100767 struct ath_frame_info *fi;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200768 struct sk_buff *skb;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200769 u16 seqno;
Sujithe8324352009-01-16 21:38:42 +0530770
771 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200772 skb = skb_peek(&tid->buf_q);
773 fi = get_frame_info(skb);
774 bf = fi->bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +0200775 if (!fi->bf)
776 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200777
Felix Fietkau44f1d262011-08-28 00:32:25 +0200778 if (!bf)
779 continue;
780
781 bf->bf_state.bf_type |= BUF_AMPDU;
782 seqno = bf->bf_state.seqno;
Felix Fietkau56dc6332011-08-28 00:32:22 +0200783 if (!bf_first)
784 bf_first = bf;
Sujithe8324352009-01-16 21:38:42 +0530785
Sujithd43f30152009-01-16 21:38:53 +0530786 /* do not step over block-ack window */
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200787 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
Sujithe8324352009-01-16 21:38:42 +0530788 status = ATH_AGGR_BAW_CLOSED;
789 break;
790 }
791
792 if (!rl) {
793 aggr_limit = ath_lookup_rate(sc, bf, tid);
794 rl = 1;
795 }
796
Sujithd43f30152009-01-16 21:38:53 +0530797 /* do not exceed aggregation limit */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100798 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
Sujithe8324352009-01-16 21:38:42 +0530799
Sujithd43f30152009-01-16 21:38:53 +0530800 if (nframes &&
Rajkumar Manoharan1a6e9d02011-08-23 12:32:57 +0530801 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
802 ath_lookup_legacy(bf))) {
Sujithe8324352009-01-16 21:38:42 +0530803 status = ATH_AGGR_LIMITED;
804 break;
805 }
806
Felix Fietkau0299a502010-10-21 02:47:24 +0200807 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
808 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
809 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
810 break;
811
Sujithd43f30152009-01-16 21:38:53 +0530812 /* do not exceed subframe limit */
813 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
Sujithe8324352009-01-16 21:38:42 +0530814 status = ATH_AGGR_LIMITED;
815 break;
816 }
817
Sujithd43f30152009-01-16 21:38:53 +0530818 /* add padding for previous frame to aggregation length */
Sujithe8324352009-01-16 21:38:42 +0530819 al += bpad + al_delta;
820
821 /*
822 * Get the delimiters needed to meet the MPDU
823 * density for this node.
824 */
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530825 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
826 !nframes);
Sujithe8324352009-01-16 21:38:42 +0530827 bpad = PADBYTES(al_delta) + (ndelim << 2);
828
Rajkumar Manoharan7a12dfd2011-08-13 10:28:08 +0530829 nframes++;
Sujithe8324352009-01-16 21:38:42 +0530830 bf->bf_next = NULL;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400831 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
Sujithe8324352009-01-16 21:38:42 +0530832
Sujithd43f30152009-01-16 21:38:53 +0530833 /* link buffers of this frame to the aggregate */
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100834 if (!fi->retries)
Felix Fietkau6a0ddae2011-08-28 00:32:23 +0200835 ath_tx_addto_baw(sc, tid, seqno);
Sujithd43f30152009-01-16 21:38:53 +0530836 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
Felix Fietkau56dc6332011-08-28 00:32:22 +0200837
838 __skb_unlink(skb, &tid->buf_q);
839 list_add_tail(&bf->list, bf_q);
Sujithe8324352009-01-16 21:38:42 +0530840 if (bf_prev) {
841 bf_prev->bf_next = bf;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -0400842 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
843 bf->bf_daddr);
Sujithe8324352009-01-16 21:38:42 +0530844 }
845 bf_prev = bf;
Sujithfec247c2009-07-27 12:08:16 +0530846
Felix Fietkau56dc6332011-08-28 00:32:22 +0200847 } while (!skb_queue_empty(&tid->buf_q));
Sujithe8324352009-01-16 21:38:42 +0530848
Felix Fietkau269c44b2010-11-14 15:20:06 +0100849 *aggr_len = al;
Sujithd43f30152009-01-16 21:38:53 +0530850
Sujithe8324352009-01-16 21:38:42 +0530851 return status;
852#undef PADBYTES
853}
854
855static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
856 struct ath_atx_tid *tid)
857{
Sujithd43f30152009-01-16 21:38:53 +0530858 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +0530859 enum ATH_AGGR_STATUS status;
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100860 struct ath_frame_info *fi;
Sujithe8324352009-01-16 21:38:42 +0530861 struct list_head bf_q;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100862 int aggr_len;
Sujithe8324352009-01-16 21:38:42 +0530863
864 do {
Felix Fietkau56dc6332011-08-28 00:32:22 +0200865 if (skb_queue_empty(&tid->buf_q))
Sujithe8324352009-01-16 21:38:42 +0530866 return;
867
868 INIT_LIST_HEAD(&bf_q);
869
Felix Fietkau269c44b2010-11-14 15:20:06 +0100870 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530871
872 /*
Sujithd43f30152009-01-16 21:38:53 +0530873 * no frames picked up to be aggregated;
874 * block-ack window is not open.
Sujithe8324352009-01-16 21:38:42 +0530875 */
876 if (list_empty(&bf_q))
877 break;
878
879 bf = list_first_entry(&bf_q, struct ath_buf, list);
Sujithd43f30152009-01-16 21:38:53 +0530880 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +0530881
Felix Fietkau55195412011-04-17 23:28:09 +0200882 if (tid->ac->clear_ps_filter) {
883 tid->ac->clear_ps_filter = false;
884 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
885 }
886
Sujithd43f30152009-01-16 21:38:53 +0530887 /* if only one frame, send as non-aggregate */
Felix Fietkaub572d032010-11-14 15:20:07 +0100888 if (bf == bf->bf_lastbf) {
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100889 fi = get_frame_info(bf->bf_mpdu);
890
Sujithe8324352009-01-16 21:38:42 +0530891 bf->bf_state.bf_type &= ~BUF_AGGR;
Sujithd43f30152009-01-16 21:38:53 +0530892 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
Felix Fietkau2d42efc2010-11-14 15:20:13 +0100893 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +0200894 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Sujithe8324352009-01-16 21:38:42 +0530895 continue;
896 }
897
Sujithd43f30152009-01-16 21:38:53 +0530898 /* setup first desc of aggregate */
Sujithe8324352009-01-16 21:38:42 +0530899 bf->bf_state.bf_type |= BUF_AGGR;
Felix Fietkau269c44b2010-11-14 15:20:06 +0100900 ath_buf_set_rate(sc, bf, aggr_len);
901 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
Sujithe8324352009-01-16 21:38:42 +0530902
Sujithd43f30152009-01-16 21:38:53 +0530903 /* anchor last desc of aggregate */
904 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
Sujithe8324352009-01-16 21:38:42 +0530905
Felix Fietkaufce041b2011-05-19 12:20:25 +0200906 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
Sujithfec247c2009-07-27 12:08:16 +0530907 TX_STAT_INC(txq->axq_qnum, a_aggr);
Sujithe8324352009-01-16 21:38:42 +0530908
Felix Fietkau4b3ba662010-12-17 00:57:00 +0100909 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
Sujithe8324352009-01-16 21:38:42 +0530910 status != ATH_AGGR_BAW_CLOSED);
911}
912
Felix Fietkau231c3a12010-09-20 19:35:28 +0200913int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
914 u16 tid, u16 *ssn)
Sujithe8324352009-01-16 21:38:42 +0530915{
916 struct ath_atx_tid *txtid;
917 struct ath_node *an;
918
919 an = (struct ath_node *)sta->drv_priv;
Sujithf83da962009-07-23 15:32:37 +0530920 txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau231c3a12010-09-20 19:35:28 +0200921
922 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
923 return -EAGAIN;
924
Sujithf83da962009-07-23 15:32:37 +0530925 txtid->state |= AGGR_ADDBA_PROGRESS;
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200926 txtid->paused = true;
Felix Fietkau49447f22011-01-10 17:05:48 -0700927 *ssn = txtid->seq_start = txtid->seq_next;
Felix Fietkau231c3a12010-09-20 19:35:28 +0200928
Felix Fietkau2ed72222011-01-10 17:05:49 -0700929 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
930 txtid->baw_head = txtid->baw_tail = 0;
931
Felix Fietkau231c3a12010-09-20 19:35:28 +0200932 return 0;
Sujithe8324352009-01-16 21:38:42 +0530933}
934
Sujithf83da962009-07-23 15:32:37 +0530935void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Sujithe8324352009-01-16 21:38:42 +0530936{
937 struct ath_node *an = (struct ath_node *)sta->drv_priv;
938 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
Felix Fietkau066dae92010-11-07 14:59:39 +0100939 struct ath_txq *txq = txtid->ac->txq;
Sujithe8324352009-01-16 21:38:42 +0530940
941 if (txtid->state & AGGR_CLEANUP)
Sujithf83da962009-07-23 15:32:37 +0530942 return;
Sujithe8324352009-01-16 21:38:42 +0530943
944 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Vasanthakumar Thiagarajan5eae6592009-06-09 15:28:21 +0530945 txtid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithf83da962009-07-23 15:32:37 +0530946 return;
Sujithe8324352009-01-16 21:38:42 +0530947 }
948
Sujithe8324352009-01-16 21:38:42 +0530949 spin_lock_bh(&txq->axq_lock);
Lorenzo Bianconi75401842010-08-01 15:47:32 +0200950 txtid->paused = true;
Felix Fietkau90fa5392010-09-20 13:45:38 +0200951
952 /*
953 * If frames are still being transmitted for this TID, they will be
954 * cleaned up during tx completion. To prevent race conditions, this
955 * TID can only be reused after all in-progress subframes have been
956 * completed.
957 */
958 if (txtid->baw_head != txtid->baw_tail)
959 txtid->state |= AGGR_CLEANUP;
960 else
961 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Sujithd43f30152009-01-16 21:38:53 +0530962 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +0530963
Felix Fietkau90fa5392010-09-20 13:45:38 +0200964 ath_tx_flush_tid(sc, txtid);
Sujithe8324352009-01-16 21:38:42 +0530965}
966
Felix Fietkau55195412011-04-17 23:28:09 +0200967bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
968{
969 struct ath_atx_tid *tid;
970 struct ath_atx_ac *ac;
971 struct ath_txq *txq;
972 bool buffered = false;
973 int tidno;
974
975 for (tidno = 0, tid = &an->tid[tidno];
976 tidno < WME_NUM_TID; tidno++, tid++) {
977
978 if (!tid->sched)
979 continue;
980
981 ac = tid->ac;
982 txq = ac->txq;
983
984 spin_lock_bh(&txq->axq_lock);
985
Felix Fietkau56dc6332011-08-28 00:32:22 +0200986 if (!skb_queue_empty(&tid->buf_q))
Felix Fietkau55195412011-04-17 23:28:09 +0200987 buffered = true;
988
989 tid->sched = false;
990 list_del(&tid->list);
991
992 if (ac->sched) {
993 ac->sched = false;
994 list_del(&ac->list);
995 }
996
997 spin_unlock_bh(&txq->axq_lock);
998 }
999
1000 return buffered;
1001}
1002
1003void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1004{
1005 struct ath_atx_tid *tid;
1006 struct ath_atx_ac *ac;
1007 struct ath_txq *txq;
1008 int tidno;
1009
1010 for (tidno = 0, tid = &an->tid[tidno];
1011 tidno < WME_NUM_TID; tidno++, tid++) {
1012
1013 ac = tid->ac;
1014 txq = ac->txq;
1015
1016 spin_lock_bh(&txq->axq_lock);
1017 ac->clear_ps_filter = true;
1018
Felix Fietkau56dc6332011-08-28 00:32:22 +02001019 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
Felix Fietkau55195412011-04-17 23:28:09 +02001020 ath_tx_queue_tid(txq, tid);
1021 ath_txq_schedule(sc, txq);
1022 }
1023
1024 spin_unlock_bh(&txq->axq_lock);
1025 }
1026}
1027
Sujithe8324352009-01-16 21:38:42 +05301028void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1029{
1030 struct ath_atx_tid *txtid;
1031 struct ath_node *an;
1032
1033 an = (struct ath_node *)sta->drv_priv;
1034
1035 if (sc->sc_flags & SC_OP_TXAGGR) {
1036 txtid = ATH_AN_2_TID(an, tid);
1037 txtid->baw_size =
1038 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1039 txtid->state |= AGGR_ADDBA_COMPLETE;
1040 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1041 ath_tx_resume_tid(sc, txtid);
1042 }
1043}
1044
Sujithe8324352009-01-16 21:38:42 +05301045/********************/
1046/* Queue Management */
1047/********************/
1048
Sujithe8324352009-01-16 21:38:42 +05301049static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1050 struct ath_txq *txq)
1051{
1052 struct ath_atx_ac *ac, *ac_tmp;
1053 struct ath_atx_tid *tid, *tid_tmp;
1054
1055 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1056 list_del(&ac->list);
1057 ac->sched = false;
1058 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1059 list_del(&tid->list);
1060 tid->sched = false;
1061 ath_tid_drain(sc, txq, tid);
1062 }
1063 }
1064}
1065
1066struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1067{
Sujithcbe61d82009-02-09 13:27:12 +05301068 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001069 struct ath_common *common = ath9k_hw_common(ah);
Sujithe8324352009-01-16 21:38:42 +05301070 struct ath9k_tx_queue_info qi;
Felix Fietkau066dae92010-11-07 14:59:39 +01001071 static const int subtype_txq_to_hwq[] = {
1072 [WME_AC_BE] = ATH_TXQ_AC_BE,
1073 [WME_AC_BK] = ATH_TXQ_AC_BK,
1074 [WME_AC_VI] = ATH_TXQ_AC_VI,
1075 [WME_AC_VO] = ATH_TXQ_AC_VO,
1076 };
Ben Greear60f2d1d2011-01-09 23:11:52 -08001077 int axq_qnum, i;
Sujithe8324352009-01-16 21:38:42 +05301078
1079 memset(&qi, 0, sizeof(qi));
Felix Fietkau066dae92010-11-07 14:59:39 +01001080 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
Sujithe8324352009-01-16 21:38:42 +05301081 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1082 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1083 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1084 qi.tqi_physCompBuf = 0;
1085
1086 /*
1087 * Enable interrupts only for EOL and DESC conditions.
1088 * We mark tx descriptors to receive a DESC interrupt
1089 * when a tx queue gets deep; otherwise waiting for the
1090 * EOL to reap descriptors. Note that this is done to
1091 * reduce interrupt load and this only defers reaping
1092 * descriptors, never transmitting frames. Aside from
1093 * reducing interrupts this also permits more concurrency.
1094 * The only potential downside is if the tx queue backs
1095 * up in which case the top half of the kernel may backup
1096 * due to a lack of tx descriptors.
1097 *
1098 * The UAPSD queue is an exception, since we take a desc-
1099 * based intr on the EOSP frames.
1100 */
Vasanthakumar Thiagarajanafe754d2010-04-15 17:39:40 -04001101 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1102 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1103 TXQ_FLAG_TXERRINT_ENABLE;
1104 } else {
1105 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1106 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1107 else
1108 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1109 TXQ_FLAG_TXDESCINT_ENABLE;
1110 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001111 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1112 if (axq_qnum == -1) {
Sujithe8324352009-01-16 21:38:42 +05301113 /*
1114 * NB: don't print a message, this happens
1115 * normally on parts with too few tx queues
1116 */
1117 return NULL;
1118 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001119 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
Joe Perches38002762010-12-02 19:12:36 -08001120 ath_err(common, "qnum %u out of range, max %zu!\n",
Ben Greear60f2d1d2011-01-09 23:11:52 -08001121 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1122 ath9k_hw_releasetxqueue(ah, axq_qnum);
Sujithe8324352009-01-16 21:38:42 +05301123 return NULL;
1124 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001125 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1126 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301127
Ben Greear60f2d1d2011-01-09 23:11:52 -08001128 txq->axq_qnum = axq_qnum;
1129 txq->mac80211_qnum = -1;
Sujithe8324352009-01-16 21:38:42 +05301130 txq->axq_link = NULL;
1131 INIT_LIST_HEAD(&txq->axq_q);
1132 INIT_LIST_HEAD(&txq->axq_acq);
1133 spin_lock_init(&txq->axq_lock);
1134 txq->axq_depth = 0;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001135 txq->axq_ampdu_depth = 0;
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04001136 txq->axq_tx_inprogress = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08001137 sc->tx.txqsetup |= 1<<axq_qnum;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001138
1139 txq->txq_headidx = txq->txq_tailidx = 0;
1140 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1141 INIT_LIST_HEAD(&txq->txq_fifo[i]);
Sujithe8324352009-01-16 21:38:42 +05301142 }
Ben Greear60f2d1d2011-01-09 23:11:52 -08001143 return &sc->tx.txq[axq_qnum];
Sujithe8324352009-01-16 21:38:42 +05301144}
1145
Sujithe8324352009-01-16 21:38:42 +05301146int ath_txq_update(struct ath_softc *sc, int qnum,
1147 struct ath9k_tx_queue_info *qinfo)
1148{
Sujithcbe61d82009-02-09 13:27:12 +05301149 struct ath_hw *ah = sc->sc_ah;
Sujithe8324352009-01-16 21:38:42 +05301150 int error = 0;
1151 struct ath9k_tx_queue_info qi;
1152
1153 if (qnum == sc->beacon.beaconq) {
1154 /*
1155 * XXX: for beacon queue, we just save the parameter.
1156 * It will be picked up by ath_beaconq_config when
1157 * it's necessary.
1158 */
1159 sc->beacon.beacon_qi = *qinfo;
1160 return 0;
1161 }
1162
Luis R. Rodriguez9680e8a2009-09-13 23:28:00 -07001163 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
Sujithe8324352009-01-16 21:38:42 +05301164
1165 ath9k_hw_get_txq_props(ah, qnum, &qi);
1166 qi.tqi_aifs = qinfo->tqi_aifs;
1167 qi.tqi_cwmin = qinfo->tqi_cwmin;
1168 qi.tqi_cwmax = qinfo->tqi_cwmax;
1169 qi.tqi_burstTime = qinfo->tqi_burstTime;
1170 qi.tqi_readyTime = qinfo->tqi_readyTime;
1171
1172 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Joe Perches38002762010-12-02 19:12:36 -08001173 ath_err(ath9k_hw_common(sc->sc_ah),
1174 "Unable to update hardware queue %u!\n", qnum);
Sujithe8324352009-01-16 21:38:42 +05301175 error = -EIO;
1176 } else {
1177 ath9k_hw_resettxqueue(ah, qnum);
1178 }
1179
1180 return error;
1181}
1182
1183int ath_cabq_update(struct ath_softc *sc)
1184{
1185 struct ath9k_tx_queue_info qi;
Steve Brown9814f6b2011-02-07 17:10:39 -07001186 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
Sujithe8324352009-01-16 21:38:42 +05301187 int qnum = sc->beacon.cabq->axq_qnum;
Sujithe8324352009-01-16 21:38:42 +05301188
1189 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1190 /*
1191 * Ensure the readytime % is within the bounds.
1192 */
Sujith17d79042009-02-09 13:27:03 +05301193 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1194 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1195 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1196 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
Sujithe8324352009-01-16 21:38:42 +05301197
Steve Brown9814f6b2011-02-07 17:10:39 -07001198 qi.tqi_readyTime = (cur_conf->beacon_interval *
Sujithfdbf7332009-02-17 15:36:35 +05301199 sc->config.cabqReadytime) / 100;
Sujithe8324352009-01-16 21:38:42 +05301200 ath_txq_update(sc, qnum, &qi);
1201
1202 return 0;
1203}
1204
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001205static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1206{
1207 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1208 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1209}
1210
Felix Fietkaufce041b2011-05-19 12:20:25 +02001211static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1212 struct list_head *list, bool retry_tx)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05301213 __releases(txq->axq_lock)
1214 __acquires(txq->axq_lock)
Sujithe8324352009-01-16 21:38:42 +05301215{
1216 struct ath_buf *bf, *lastbf;
1217 struct list_head bf_head;
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001218 struct ath_tx_status ts;
1219
1220 memset(&ts, 0, sizeof(ts));
Sujithe8324352009-01-16 21:38:42 +05301221 INIT_LIST_HEAD(&bf_head);
1222
Felix Fietkaufce041b2011-05-19 12:20:25 +02001223 while (!list_empty(list)) {
1224 bf = list_first_entry(list, struct ath_buf, list);
Sujithe8324352009-01-16 21:38:42 +05301225
Felix Fietkaufce041b2011-05-19 12:20:25 +02001226 if (bf->bf_stale) {
1227 list_del(&bf->list);
Sujithe8324352009-01-16 21:38:42 +05301228
Felix Fietkaufce041b2011-05-19 12:20:25 +02001229 ath_tx_return_buffer(sc, bf);
1230 continue;
Sujithe8324352009-01-16 21:38:42 +05301231 }
1232
1233 lastbf = bf->bf_lastbf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02001234 list_cut_position(&bf_head, list, &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001235
Sujithe8324352009-01-16 21:38:42 +05301236 txq->axq_depth--;
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001237 if (bf_is_ampdu_not_probing(bf))
1238 txq->axq_ampdu_depth--;
Sujithe8324352009-01-16 21:38:42 +05301239
Felix Fietkaufce041b2011-05-19 12:20:25 +02001240 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301241 if (bf_isampdu(bf))
Felix Fietkauc5992612010-11-14 15:20:09 +01001242 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1243 retry_tx);
Sujithe8324352009-01-16 21:38:42 +05301244 else
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001245 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001246 spin_lock_bh(&txq->axq_lock);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001247 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001248}
1249
1250/*
1251 * Drain a given TX queue (could be Beacon or Data)
1252 *
1253 * This assumes output has been stopped and
1254 * we do not need to block ath_tx_tasklet.
1255 */
1256void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1257{
1258 spin_lock_bh(&txq->axq_lock);
1259 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1260 int idx = txq->txq_tailidx;
1261
1262 while (!list_empty(&txq->txq_fifo[idx])) {
1263 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1264 retry_tx);
1265
1266 INCR(idx, ATH_TXFIFO_DEPTH);
1267 }
1268 txq->txq_tailidx = idx;
1269 }
1270
1271 txq->axq_link = NULL;
1272 txq->axq_tx_inprogress = false;
1273 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
Felix Fietkaue609e2e2010-10-27 02:15:05 +02001274
1275 /* flush any pending frames if aggregation is enabled */
Felix Fietkaufce041b2011-05-19 12:20:25 +02001276 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1277 ath_txq_drain_pending_buffers(sc, txq);
1278
1279 spin_unlock_bh(&txq->axq_lock);
Sujithe8324352009-01-16 21:38:42 +05301280}
1281
Felix Fietkau080e1a22010-12-05 20:17:53 +01001282bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
Sujith043a0402009-01-16 21:38:47 +05301283{
Sujithcbe61d82009-02-09 13:27:12 +05301284 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001285 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Sujith043a0402009-01-16 21:38:47 +05301286 struct ath_txq *txq;
1287 int i, npend = 0;
1288
1289 if (sc->sc_flags & SC_OP_INVALID)
Felix Fietkau080e1a22010-12-05 20:17:53 +01001290 return true;
Sujith043a0402009-01-16 21:38:47 +05301291
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001292 ath9k_hw_abort_tx_dma(ah);
Sujith043a0402009-01-16 21:38:47 +05301293
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001294 /* Check if any queue remains active */
Sujith043a0402009-01-16 21:38:47 +05301295 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau0d51ccc2011-03-11 21:38:18 +01001296 if (!ATH_TXQ_SETUP(sc, i))
1297 continue;
1298
1299 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
Sujith043a0402009-01-16 21:38:47 +05301300 }
1301
Felix Fietkau080e1a22010-12-05 20:17:53 +01001302 if (npend)
John W. Linville393934c2010-12-08 16:23:31 -05001303 ath_err(common, "Failed to stop TX DMA!\n");
Sujith043a0402009-01-16 21:38:47 +05301304
1305 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Felix Fietkau92460412011-01-24 19:23:14 +01001306 if (!ATH_TXQ_SETUP(sc, i))
1307 continue;
1308
1309 /*
1310 * The caller will resume queues with ieee80211_wake_queues.
1311 * Mark the queue as not stopped to prevent ath_tx_complete
1312 * from waking the queue too early.
1313 */
1314 txq = &sc->tx.txq[i];
1315 txq->stopped = false;
1316 ath_draintxq(sc, txq, retry_tx);
Sujith043a0402009-01-16 21:38:47 +05301317 }
Felix Fietkau080e1a22010-12-05 20:17:53 +01001318
1319 return !npend;
Sujith043a0402009-01-16 21:38:47 +05301320}
1321
Sujithe8324352009-01-16 21:38:42 +05301322void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1323{
1324 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1325 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1326}
1327
Ben Greear7755bad2011-01-18 17:30:00 -08001328/* For each axq_acq entry, for each tid, try to schedule packets
1329 * for transmit until ampdu_depth has reached min Q depth.
1330 */
Sujithe8324352009-01-16 21:38:42 +05301331void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1332{
Ben Greear7755bad2011-01-18 17:30:00 -08001333 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1334 struct ath_atx_tid *tid, *last_tid;
Sujithe8324352009-01-16 21:38:42 +05301335
Felix Fietkau21f28e62011-01-15 14:30:14 +01001336 if (list_empty(&txq->axq_acq) ||
1337 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
Sujithe8324352009-01-16 21:38:42 +05301338 return;
1339
1340 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
Ben Greear7755bad2011-01-18 17:30:00 -08001341 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
Sujithe8324352009-01-16 21:38:42 +05301342
Ben Greear7755bad2011-01-18 17:30:00 -08001343 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1344 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1345 list_del(&ac->list);
1346 ac->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301347
Ben Greear7755bad2011-01-18 17:30:00 -08001348 while (!list_empty(&ac->tid_q)) {
1349 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1350 list);
1351 list_del(&tid->list);
1352 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05301353
Ben Greear7755bad2011-01-18 17:30:00 -08001354 if (tid->paused)
1355 continue;
Sujithe8324352009-01-16 21:38:42 +05301356
Ben Greear7755bad2011-01-18 17:30:00 -08001357 ath_tx_sched_aggr(sc, txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301358
Ben Greear7755bad2011-01-18 17:30:00 -08001359 /*
1360 * add tid to round-robin queue if more frames
1361 * are pending for the tid
1362 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001363 if (!skb_queue_empty(&tid->buf_q))
Ben Greear7755bad2011-01-18 17:30:00 -08001364 ath_tx_queue_tid(txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301365
Ben Greear7755bad2011-01-18 17:30:00 -08001366 if (tid == last_tid ||
1367 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1368 break;
Sujithe8324352009-01-16 21:38:42 +05301369 }
Ben Greear7755bad2011-01-18 17:30:00 -08001370
1371 if (!list_empty(&ac->tid_q)) {
1372 if (!ac->sched) {
1373 ac->sched = true;
1374 list_add_tail(&ac->list, &txq->axq_acq);
1375 }
1376 }
1377
1378 if (ac == last_ac ||
1379 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1380 return;
Sujithe8324352009-01-16 21:38:42 +05301381 }
1382}
1383
Sujithe8324352009-01-16 21:38:42 +05301384/***********/
1385/* TX, DMA */
1386/***********/
1387
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001388/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001389 * Insert a chain of ath_buf (descriptors) on a txq and
1390 * assume the descriptors are already chained together by caller.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001391 */
Sujith102e0572008-10-29 10:15:16 +05301392static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkaufce041b2011-05-19 12:20:25 +02001393 struct list_head *head, bool internal)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001394{
Sujithcbe61d82009-02-09 13:27:12 +05301395 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001396 struct ath_common *common = ath9k_hw_common(ah);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001397 struct ath_buf *bf, *bf_last;
1398 bool puttxbuf = false;
1399 bool edma;
Sujith102e0572008-10-29 10:15:16 +05301400
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001401 /*
1402 * Insert the frame on the outbound list and
1403 * pass it on to the hardware.
1404 */
1405
1406 if (list_empty(head))
1407 return;
1408
Felix Fietkaufce041b2011-05-19 12:20:25 +02001409 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001410 bf = list_first_entry(head, struct ath_buf, list);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001411 bf_last = list_entry(head->prev, struct ath_buf, list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001412
Joe Perches226afe62010-12-02 19:12:37 -08001413 ath_dbg(common, ATH_DBG_QUEUE,
1414 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001415
Felix Fietkaufce041b2011-05-19 12:20:25 +02001416 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1417 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001418 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001419 puttxbuf = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001420 } else {
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001421 list_splice_tail_init(head, &txq->axq_q);
1422
Felix Fietkaufce041b2011-05-19 12:20:25 +02001423 if (txq->axq_link) {
1424 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
Joe Perches226afe62010-12-02 19:12:37 -08001425 ath_dbg(common, ATH_DBG_XMIT,
1426 "link[%u] (%p)=%llx (%p)\n",
1427 txq->axq_qnum, txq->axq_link,
1428 ito64(bf->bf_daddr), bf->bf_desc);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001429 } else if (!edma)
1430 puttxbuf = true;
1431
1432 txq->axq_link = bf_last->bf_desc;
1433 }
1434
1435 if (puttxbuf) {
1436 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1437 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1438 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1439 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1440 }
1441
1442 if (!edma) {
Felix Fietkau8d8d3fd2011-01-24 19:11:54 +01001443 TX_STAT_INC(txq->axq_qnum, txstart);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04001444 ath9k_hw_txstart(ah, txq->axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001445 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02001446
1447 if (!internal) {
1448 txq->axq_depth++;
1449 if (bf_is_ampdu_not_probing(bf))
1450 txq->axq_ampdu_depth++;
1451 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001452}
1453
Sujithe8324352009-01-16 21:38:42 +05301454static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001455 struct sk_buff *skb, struct ath_tx_control *txctl)
Sujithe8324352009-01-16 21:38:42 +05301456{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001457 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau04caf862010-11-14 15:20:12 +01001458 struct list_head bf_head;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001459 struct ath_buf *bf;
Sujithe8324352009-01-16 21:38:42 +05301460
1461 /*
1462 * Do not queue to h/w when any of the following conditions is true:
1463 * - there are pending frames in software queue
1464 * - the TID is currently paused for ADDBA/BAR request
1465 * - seqno is not within block-ack window
1466 * - h/w queue depth exceeds low water mark
1467 */
Felix Fietkau56dc6332011-08-28 00:32:22 +02001468 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
Felix Fietkau44f1d262011-08-28 00:32:25 +02001469 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
Felix Fietkau4b3ba662010-12-17 00:57:00 +01001470 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001471 /*
Sujithe8324352009-01-16 21:38:42 +05301472 * Add this frame to software queue for scheduling later
1473 * for aggregation.
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001474 */
Ben Greearbda8add2011-01-09 23:11:48 -08001475 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001476 __skb_queue_tail(&tid->buf_q, skb);
Felix Fietkau9af73cf2011-08-10 15:23:35 -06001477 if (!txctl->an || !txctl->an->sleeping)
1478 ath_tx_queue_tid(txctl->txq, tid);
Sujithe8324352009-01-16 21:38:42 +05301479 return;
Jouni Malinenf7a276a2008-12-15 16:02:04 +02001480 }
1481
Felix Fietkau44f1d262011-08-28 00:32:25 +02001482 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1483 if (!bf)
1484 return;
1485
1486 bf->bf_state.bf_type |= BUF_AMPDU;
Felix Fietkau04caf862010-11-14 15:20:12 +01001487 INIT_LIST_HEAD(&bf_head);
1488 list_add(&bf->list, &bf_head);
1489
Sujithe8324352009-01-16 21:38:42 +05301490 /* Add sub-frame to BAW */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001491 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
Sujithe8324352009-01-16 21:38:42 +05301492
1493 /* Queue to h/w without aggregation */
Ben Greearbda8add2011-01-09 23:11:48 -08001494 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Sujithd43f30152009-01-16 21:38:53 +05301495 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001496 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkaufce041b2011-05-19 12:20:25 +02001497 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
Sujithc4288392008-11-18 09:09:30 +05301498}
1499
Felix Fietkau82b873a2010-11-11 03:18:37 +01001500static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
Felix Fietkau44f1d262011-08-28 00:32:25 +02001501 struct ath_atx_tid *tid, struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001502{
Felix Fietkau44f1d262011-08-28 00:32:25 +02001503 struct ath_frame_info *fi = get_frame_info(skb);
1504 struct list_head bf_head;
Sujithe8324352009-01-16 21:38:42 +05301505 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001506
Felix Fietkau44f1d262011-08-28 00:32:25 +02001507 bf = fi->bf;
1508 if (!bf)
1509 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1510
1511 if (!bf)
1512 return;
1513
1514 INIT_LIST_HEAD(&bf_head);
1515 list_add_tail(&bf->list, &bf_head);
Sujithe8324352009-01-16 21:38:42 +05301516 bf->bf_state.bf_type &= ~BUF_AMPDU;
1517
1518 /* update starting sequence number for subsequent ADDBA request */
Felix Fietkau82b873a2010-11-11 03:18:37 +01001519 if (tid)
1520 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
Sujithe8324352009-01-16 21:38:42 +05301521
Sujithd43f30152009-01-16 21:38:53 +05301522 bf->bf_lastbf = bf;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001523 ath_buf_set_rate(sc, bf, fi->framelen);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001524 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
Sujithfec247c2009-07-27 12:08:16 +05301525 TX_STAT_INC(txq->axq_qnum, queued);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001526}
1527
Sujith528f0c62008-10-29 10:14:26 +05301528static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001529{
Sujith528f0c62008-10-29 10:14:26 +05301530 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001531 enum ath9k_pkt_type htype;
1532 __le16 fc;
1533
Sujith528f0c62008-10-29 10:14:26 +05301534 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001535 fc = hdr->frame_control;
1536
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001537 if (ieee80211_is_beacon(fc))
1538 htype = ATH9K_PKT_TYPE_BEACON;
1539 else if (ieee80211_is_probe_resp(fc))
1540 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1541 else if (ieee80211_is_atim(fc))
1542 htype = ATH9K_PKT_TYPE_ATIM;
1543 else if (ieee80211_is_pspoll(fc))
1544 htype = ATH9K_PKT_TYPE_PSPOLL;
1545 else
1546 htype = ATH9K_PKT_TYPE_NORMAL;
1547
1548 return htype;
1549}
1550
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001551static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1552 int framelen)
Sujith528f0c62008-10-29 10:14:26 +05301553{
1554 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001555 struct ieee80211_sta *sta = tx_info->control.sta;
1556 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
Felix Fietkau6a0ddae2011-08-28 00:32:23 +02001557 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001558 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001559 struct ath_node *an = NULL;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001560 enum ath9k_key_type keytype;
Sujith528f0c62008-10-29 10:14:26 +05301561
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001562 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
Sujith528f0c62008-10-29 10:14:26 +05301563
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001564 if (sta)
1565 an = (struct ath_node *) sta->drv_priv;
1566
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001567 memset(fi, 0, sizeof(*fi));
1568 if (hw_key)
1569 fi->keyix = hw_key->hw_key_idx;
Felix Fietkau93ae2dd2011-04-17 23:28:10 +02001570 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1571 fi->keyix = an->ps_key;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001572 else
1573 fi->keyix = ATH9K_TXKEYIX_INVALID;
1574 fi->keytype = keytype;
1575 fi->framelen = framelen;
Sujith528f0c62008-10-29 10:14:26 +05301576}
1577
Felix Fietkau82b873a2010-11-11 03:18:37 +01001578static int setup_tx_flags(struct sk_buff *skb)
Sujith528f0c62008-10-29 10:14:26 +05301579{
1580 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1581 int flags = 0;
1582
Sujith528f0c62008-10-29 10:14:26 +05301583 flags |= ATH9K_TXDESC_INTREQ;
1584
1585 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1586 flags |= ATH9K_TXDESC_NOACK;
Sujith528f0c62008-10-29 10:14:26 +05301587
Felix Fietkau82b873a2010-11-11 03:18:37 +01001588 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
Luis R. Rodriguezb0a33442010-04-15 17:39:39 -04001589 flags |= ATH9K_TXDESC_LDPC;
1590
Sujith528f0c62008-10-29 10:14:26 +05301591 return flags;
1592}
1593
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001594/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001595 * rix - rate index
1596 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1597 * width - 0 for 20 MHz, 1 for 40 MHz
1598 * half_gi - to use 4us v/s 3.6 us for symbol time
1599 */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001600static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
Sujith102e0572008-10-29 10:15:16 +05301601 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001602{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001603 u32 nbits, nsymbits, duration, nsymbols;
Felix Fietkau269c44b2010-11-14 15:20:06 +01001604 int streams;
Sujithe63835b2008-11-18 09:07:53 +05301605
1606 /* find number of symbols: PLCP + data */
Felix Fietkauc6663872010-04-19 19:57:33 +02001607 streams = HT_RC_2_STREAMS(rix);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001608 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
Felix Fietkauc6663872010-04-19 19:57:33 +02001609 nsymbits = bits_per_symbol[rix % 8][width] * streams;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001610 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1611
1612 if (!half_gi)
1613 duration = SYMBOL_TIME(nsymbols);
1614 else
1615 duration = SYMBOL_TIME_HALFGI(nsymbols);
1616
Sujithe63835b2008-11-18 09:07:53 +05301617 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001618 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +05301619
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001620 return duration;
1621}
1622
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301623u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1624{
1625 struct ath_hw *ah = sc->sc_ah;
1626 struct ath9k_channel *curchan = ah->curchan;
Rajkumar Manoharand77bf3e2011-08-13 10:28:14 +05301627 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1628 (curchan->channelFlags & CHANNEL_5GHZ) &&
1629 (chainmask == 0x7) && (rate < 0x90))
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301630 return 0x3;
1631 else
1632 return chainmask;
1633}
1634
Felix Fietkau269c44b2010-11-14 15:20:06 +01001635static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001636{
Luis R. Rodriguez43c27612009-09-13 21:07:07 -07001637 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001638 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +05301639 struct sk_buff *skb;
1640 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301641 struct ieee80211_tx_rate *rates;
Felix Fietkau545750d2009-11-23 22:21:01 +01001642 const struct ieee80211_rate *rate;
Sujith254ad0f2009-02-04 08:10:19 +05301643 struct ieee80211_hdr *hdr;
Sujithc89424d2009-01-30 14:29:28 +05301644 int i, flags = 0;
1645 u8 rix = 0, ctsrate = 0;
Sujith254ad0f2009-02-04 08:10:19 +05301646 bool is_pspoll;
Sujithe63835b2008-11-18 09:07:53 +05301647
1648 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +05301649
Sujitha22be222009-03-30 15:28:36 +05301650 skb = bf->bf_mpdu;
Sujith528f0c62008-10-29 10:14:26 +05301651 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +05301652 rates = tx_info->control.rates;
Sujith254ad0f2009-02-04 08:10:19 +05301653 hdr = (struct ieee80211_hdr *)skb->data;
1654 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
Sujith528f0c62008-10-29 10:14:26 +05301655
Sujithc89424d2009-01-30 14:29:28 +05301656 /*
1657 * We check if Short Preamble is needed for the CTS rate by
1658 * checking the BSS's global flag.
1659 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1660 */
Felix Fietkau545750d2009-11-23 22:21:01 +01001661 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1662 ctsrate = rate->hw_value;
Sujithc89424d2009-01-30 14:29:28 +05301663 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
Felix Fietkau545750d2009-11-23 22:21:01 +01001664 ctsrate |= rate->hw_value_short;
Luis R. Rodriguez96742252008-12-23 15:58:38 -08001665
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001666 for (i = 0; i < 4; i++) {
Felix Fietkau545750d2009-11-23 22:21:01 +01001667 bool is_40, is_sgi, is_sp;
1668 int phy;
1669
Sujithe63835b2008-11-18 09:07:53 +05301670 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001671 continue;
1672
Sujitha8efee42008-11-18 09:07:30 +05301673 rix = rates[i].idx;
Sujitha8efee42008-11-18 09:07:30 +05301674 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001675
Mohammed Shafi Shajakhancbe8c732011-05-03 13:14:06 +05301676 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Sujithc89424d2009-01-30 14:29:28 +05301677 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Felix Fietkau27032052010-01-17 21:08:50 +01001678 flags |= ATH9K_TXDESC_RTSENA;
1679 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1680 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1681 flags |= ATH9K_TXDESC_CTSENA;
1682 }
1683
Sujithc89424d2009-01-30 14:29:28 +05301684 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1685 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1686 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1687 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001688
Felix Fietkau545750d2009-11-23 22:21:01 +01001689 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1690 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1691 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1692
1693 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1694 /* MCS rates */
1695 series[i].Rate = rix | 0x80;
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301696 series[i].ChSel = ath_txchainmask_reduction(sc,
1697 common->tx_chainmask, series[i].Rate);
Felix Fietkau269c44b2010-11-14 15:20:06 +01001698 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
Felix Fietkau545750d2009-11-23 22:21:01 +01001699 is_40, is_sgi, is_sp);
Felix Fietkau074a8c02010-04-19 19:57:36 +02001700 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1701 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
Felix Fietkau545750d2009-11-23 22:21:01 +01001702 continue;
1703 }
1704
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301705 /* legacy rates */
Felix Fietkau545750d2009-11-23 22:21:01 +01001706 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1707 !(rate->flags & IEEE80211_RATE_ERP_G))
1708 phy = WLAN_RC_PHY_CCK;
1709 else
1710 phy = WLAN_RC_PHY_OFDM;
1711
1712 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1713 series[i].Rate = rate->hw_value;
1714 if (rate->hw_value_short) {
1715 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1716 series[i].Rate |= rate->hw_value_short;
1717 } else {
1718 is_sp = false;
1719 }
1720
Mohammed Shafi Shajakhanea066d52010-11-23 20:42:27 +05301721 if (bf->bf_state.bfs_paprd)
1722 series[i].ChSel = common->tx_chainmask;
1723 else
1724 series[i].ChSel = ath_txchainmask_reduction(sc,
1725 common->tx_chainmask, series[i].Rate);
1726
Felix Fietkau545750d2009-11-23 22:21:01 +01001727 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
Felix Fietkau269c44b2010-11-14 15:20:06 +01001728 phy, rate->bitrate * 100, len, rix, is_sp);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001729 }
1730
Felix Fietkau27032052010-01-17 21:08:50 +01001731 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
Felix Fietkau269c44b2010-11-14 15:20:06 +01001732 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
Felix Fietkau27032052010-01-17 21:08:50 +01001733 flags &= ~ATH9K_TXDESC_RTSENA;
1734
1735 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1736 if (flags & ATH9K_TXDESC_RTSENA)
1737 flags &= ~ATH9K_TXDESC_CTSENA;
1738
Sujithe63835b2008-11-18 09:07:53 +05301739 /* set dur_update_en for l-sig computation except for PS-Poll frames */
Sujithc89424d2009-01-30 14:29:28 +05301740 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1741 bf->bf_lastbf->bf_desc,
Sujith254ad0f2009-02-04 08:10:19 +05301742 !is_pspoll, ctsrate,
Sujithc89424d2009-01-30 14:29:28 +05301743 0, series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +05301744
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001745}
1746
Felix Fietkau44f1d262011-08-28 00:32:25 +02001747/*
1748 * Assign a descriptor (and sequence number if necessary,
1749 * and map buffer for DMA. Frees skb on error
1750 */
Felix Fietkaufa05f872011-08-28 00:32:24 +02001751static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
Felix Fietkau04caf862010-11-14 15:20:12 +01001752 struct ath_txq *txq,
Felix Fietkaufa05f872011-08-28 00:32:24 +02001753 struct ath_atx_tid *tid,
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001754 struct sk_buff *skb)
Sujithe8324352009-01-16 21:38:42 +05301755{
Felix Fietkau04caf862010-11-14 15:20:12 +01001756 struct ath_hw *ah = sc->sc_ah;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001757 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001758 struct ath_frame_info *fi = get_frame_info(skb);
Felix Fietkaufa05f872011-08-28 00:32:24 +02001759 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001760 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001761 struct ath_desc *ds;
Felix Fietkau04caf862010-11-14 15:20:12 +01001762 int frm_type;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001763 u16 seqno;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001764
1765 bf = ath_tx_get_buffer(sc);
1766 if (!bf) {
Joe Perches226afe62010-12-02 19:12:37 -08001767 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
Felix Fietkau44f1d262011-08-28 00:32:25 +02001768 goto error;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001769 }
Sujithe8324352009-01-16 21:38:42 +05301770
Sujithe8324352009-01-16 21:38:42 +05301771 ATH_TXBUF_RESET(bf);
1772
Felix Fietkaufa05f872011-08-28 00:32:24 +02001773 if (tid) {
1774 seqno = tid->seq_next;
1775 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1776 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1777 bf->bf_state.seqno = seqno;
1778 }
1779
Felix Fietkau82b873a2010-11-11 03:18:37 +01001780 bf->bf_flags = setup_tx_flags(skb);
Sujithe8324352009-01-16 21:38:42 +05301781 bf->bf_mpdu = skb;
1782
Ben Greearc1739eb32010-10-14 12:45:29 -07001783 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1784 skb->len, DMA_TO_DEVICE);
1785 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
Sujithe8324352009-01-16 21:38:42 +05301786 bf->bf_mpdu = NULL;
Ben Greear6cf9e992010-10-14 12:45:30 -07001787 bf->bf_buf_addr = 0;
Joe Perches38002762010-12-02 19:12:36 -08001788 ath_err(ath9k_hw_common(sc->sc_ah),
1789 "dma_mapping_error() on TX\n");
Felix Fietkau82b873a2010-11-11 03:18:37 +01001790 ath_tx_return_buffer(sc, bf);
Felix Fietkau44f1d262011-08-28 00:32:25 +02001791 goto error;
Sujithe8324352009-01-16 21:38:42 +05301792 }
1793
Sujithe8324352009-01-16 21:38:42 +05301794 frm_type = get_hw_packet_type(skb);
Sujithe8324352009-01-16 21:38:42 +05301795
1796 ds = bf->bf_desc;
Vasanthakumar Thiagarajan87d5efb2010-04-15 17:38:43 -04001797 ath9k_hw_set_desc_link(ah, ds, 0);
Sujithe8324352009-01-16 21:38:42 +05301798
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001799 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1800 fi->keyix, fi->keytype, bf->bf_flags);
Sujithe8324352009-01-16 21:38:42 +05301801
1802 ath9k_hw_filltxdesc(ah, ds,
1803 skb->len, /* segment length */
1804 true, /* first segment */
1805 true, /* last segment */
Vasanthakumar Thiagarajan3f3a1c82010-04-15 17:38:42 -04001806 ds, /* first descriptor */
Vasanthakumar Thiagarajancc610ac02010-04-15 17:39:26 -04001807 bf->bf_buf_addr,
Felix Fietkau04caf862010-11-14 15:20:12 +01001808 txq->axq_qnum);
1809
Felix Fietkau56dc6332011-08-28 00:32:22 +02001810 fi->bf = bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001811
1812 return bf;
Felix Fietkau44f1d262011-08-28 00:32:25 +02001813
1814error:
1815 dev_kfree_skb_any(skb);
1816 return NULL;
Felix Fietkau04caf862010-11-14 15:20:12 +01001817}
1818
1819/* FIXME: tx power */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001820static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
Felix Fietkau04caf862010-11-14 15:20:12 +01001821 struct ath_tx_control *txctl)
1822{
Felix Fietkau04caf862010-11-14 15:20:12 +01001823 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1824 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau248a38d2010-12-10 21:16:46 +01001825 struct ath_atx_tid *tid = NULL;
Felix Fietkaufa05f872011-08-28 00:32:24 +02001826 struct ath_buf *bf;
Felix Fietkau04caf862010-11-14 15:20:12 +01001827 u8 tidno;
Sujithe8324352009-01-16 21:38:42 +05301828
Sujithe8324352009-01-16 21:38:42 +05301829 spin_lock_bh(&txctl->txq->axq_lock);
Mohammed Shafi Shajakhan61e1b0b2011-03-21 18:27:21 +05301830 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1831 ieee80211_is_data_qos(hdr->frame_control)) {
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001832 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1833 IEEE80211_QOS_CTL_TID_MASK;
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001834 tid = ATH_AN_2_TID(txctl->an, tidno);
Felix Fietkau5daefbd2010-11-14 15:20:02 +01001835
Felix Fietkau066dae92010-11-07 14:59:39 +01001836 WARN_ON(tid->ac->txq != txctl->txq);
Felix Fietkau248a38d2010-12-10 21:16:46 +01001837 }
1838
1839 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
Felix Fietkau04caf862010-11-14 15:20:12 +01001840 /*
1841 * Try aggregation if it's a unicast data frame
1842 * and the destination is HT capable.
1843 */
Felix Fietkau44f1d262011-08-28 00:32:25 +02001844 ath_tx_send_ampdu(sc, tid, skb, txctl);
Sujithe8324352009-01-16 21:38:42 +05301845 } else {
Felix Fietkau44f1d262011-08-28 00:32:25 +02001846 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1847 if (!bf)
1848 goto out;
Felix Fietkau04caf862010-11-14 15:20:12 +01001849
Felix Fietkau82b873a2010-11-11 03:18:37 +01001850 bf->bf_state.bfs_paprd = txctl->paprd;
1851
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001852 if (bf->bf_state.bfs_paprd)
Felix Fietkau04caf862010-11-14 15:20:12 +01001853 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1854 bf->bf_state.bfs_paprd);
Felix Fietkau9a6b8272010-11-14 00:03:01 +01001855
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05301856 if (txctl->paprd)
1857 bf->bf_state.bfs_paprd_timestamp = jiffies;
1858
Felix Fietkau55195412011-04-17 23:28:09 +02001859 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1860 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
1861
Felix Fietkau44f1d262011-08-28 00:32:25 +02001862 ath_tx_send_normal(sc, txctl->txq, tid, skb);
Sujithe8324352009-01-16 21:38:42 +05301863 }
1864
Felix Fietkaufa05f872011-08-28 00:32:24 +02001865out:
Sujithe8324352009-01-16 21:38:42 +05301866 spin_unlock_bh(&txctl->txq->axq_lock);
1867}
1868
1869/* Upon failure caller should free skb */
Jouni Malinenc52f33d2009-03-03 19:23:29 +02001870int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
Sujithe8324352009-01-16 21:38:42 +05301871 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001872{
Felix Fietkau28d16702010-11-14 15:20:10 +01001873 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1874 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001875 struct ieee80211_sta *sta = info->control.sta;
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001876 struct ieee80211_vif *vif = info->control.vif;
Felix Fietkau9ac586152011-01-24 19:23:18 +01001877 struct ath_softc *sc = hw->priv;
Felix Fietkau84642d62010-06-01 21:33:13 +02001878 struct ath_txq *txq = txctl->txq;
Felix Fietkau28d16702010-11-14 15:20:10 +01001879 int padpos, padsize;
Felix Fietkau04caf862010-11-14 15:20:12 +01001880 int frmlen = skb->len + FCS_LEN;
Felix Fietkau82b873a2010-11-11 03:18:37 +01001881 int q;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001882
Ben Greeara9927ba2010-12-06 21:13:49 -08001883 /* NOTE: sta can be NULL according to net/mac80211.h */
1884 if (sta)
1885 txctl->an = (struct ath_node *)sta->drv_priv;
1886
Felix Fietkau04caf862010-11-14 15:20:12 +01001887 if (info->control.hw_key)
1888 frmlen += info->control.hw_key->icv_len;
1889
Felix Fietkau28d16702010-11-14 15:20:10 +01001890 /*
1891 * As a temporary workaround, assign seq# here; this will likely need
1892 * to be cleaned up to work better with Beacon transmission and virtual
1893 * BSSes.
1894 */
1895 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1896 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1897 sc->tx.seq_no += 0x10;
1898 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1899 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1900 }
1901
1902 /* Add the padding after the header if this is not already done */
1903 padpos = ath9k_cmn_padpos(hdr->frame_control);
1904 padsize = padpos & 3;
1905 if (padsize && skb->len > padpos) {
1906 if (skb_headroom(skb) < padsize)
1907 return -ENOMEM;
1908
1909 skb_push(skb, padsize);
1910 memmove(skb->data, skb->data + padsize, padpos);
1911 }
1912
Felix Fietkauf59a59f2011-05-10 20:52:22 +02001913 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1914 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1915 !ieee80211_is_data(hdr->frame_control))
1916 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1917
Felix Fietkau2d42efc2010-11-14 15:20:13 +01001918 setup_frame_info(hw, skb, frmlen);
1919
1920 /*
1921 * At this point, the vif, hw_key and sta pointers in the tx control
1922 * info are no longer valid (overwritten by the ath_frame_info data.
1923 */
1924
Felix Fietkau066dae92010-11-07 14:59:39 +01001925 q = skb_get_queue_mapping(skb);
Felix Fietkau97923b12010-06-12 00:33:55 -04001926 spin_lock_bh(&txq->axq_lock);
Felix Fietkau066dae92010-11-07 14:59:39 +01001927 if (txq == sc->tx.txq_map[q] &&
1928 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
Felix Fietkau7545daf2011-01-24 19:23:16 +01001929 ieee80211_stop_queue(sc->hw, q);
Felix Fietkau97923b12010-06-12 00:33:55 -04001930 txq->stopped = 1;
1931 }
1932 spin_unlock_bh(&txq->axq_lock);
1933
Felix Fietkau44f1d262011-08-28 00:32:25 +02001934 ath_tx_start_dma(sc, skb, txctl);
1935 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001936}
1937
Sujithe8324352009-01-16 21:38:42 +05301938/*****************/
1939/* TX Completion */
1940/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001941
Sujithe8324352009-01-16 21:38:42 +05301942static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05301943 int tx_flags, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001944{
Sujithe8324352009-01-16 21:38:42 +05301945 struct ieee80211_hw *hw = sc->hw;
1946 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07001947 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001948 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
Felix Fietkau97923b12010-06-12 00:33:55 -04001949 int q, padpos, padsize;
Sujithe8324352009-01-16 21:38:42 +05301950
Joe Perches226afe62010-12-02 19:12:37 -08001951 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithe8324352009-01-16 21:38:42 +05301952
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301953 if (tx_flags & ATH_TX_BAR)
Sujithe8324352009-01-16 21:38:42 +05301954 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Sujithe8324352009-01-16 21:38:42 +05301955
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05301956 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
Sujithe8324352009-01-16 21:38:42 +05301957 /* Frame was ACKed */
1958 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1959 }
1960
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001961 padpos = ath9k_cmn_padpos(hdr->frame_control);
1962 padsize = padpos & 3;
1963 if (padsize && skb->len>padpos+padsize) {
Sujithe8324352009-01-16 21:38:42 +05301964 /*
1965 * Remove MAC header padding before giving the frame back to
1966 * mac80211.
1967 */
Benoit Papillault4d91f9f2009-12-12 00:22:35 +01001968 memmove(skb->data + padsize, skb->data, padpos);
Sujithe8324352009-01-16 21:38:42 +05301969 skb_pull(skb, padsize);
1970 }
1971
Sujith1b04b932010-01-08 10:36:05 +05301972 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1973 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
Joe Perches226afe62010-12-02 19:12:37 -08001974 ath_dbg(common, ATH_DBG_PS,
1975 "Going back to sleep after having received TX status (0x%lx)\n",
Sujith1b04b932010-01-08 10:36:05 +05301976 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1977 PS_WAIT_FOR_CAB |
1978 PS_WAIT_FOR_PSPOLL_DATA |
1979 PS_WAIT_FOR_TX_ACK));
Jouni Malinen9a23f9c2009-05-19 17:01:38 +03001980 }
1981
Felix Fietkau7545daf2011-01-24 19:23:16 +01001982 q = skb_get_queue_mapping(skb);
1983 if (txq == sc->tx.txq_map[q]) {
1984 spin_lock_bh(&txq->axq_lock);
1985 if (WARN_ON(--txq->pending_frames < 0))
1986 txq->pending_frames = 0;
Felix Fietkau92460412011-01-24 19:23:14 +01001987
Felix Fietkau7545daf2011-01-24 19:23:16 +01001988 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1989 ieee80211_wake_queue(sc->hw, q);
1990 txq->stopped = 0;
Felix Fietkau066dae92010-11-07 14:59:39 +01001991 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001992 spin_unlock_bh(&txq->axq_lock);
Felix Fietkau97923b12010-06-12 00:33:55 -04001993 }
Felix Fietkau7545daf2011-01-24 19:23:16 +01001994
1995 ieee80211_tx_status(hw, skb);
Sujithe8324352009-01-16 21:38:42 +05301996}
1997
1998static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
Felix Fietkaudb1a0522010-03-29 20:07:11 -07001999 struct ath_txq *txq, struct list_head *bf_q,
2000 struct ath_tx_status *ts, int txok, int sendbar)
Sujithe8324352009-01-16 21:38:42 +05302001{
2002 struct sk_buff *skb = bf->bf_mpdu;
Sujithe8324352009-01-16 21:38:42 +05302003 unsigned long flags;
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302004 int tx_flags = 0;
Sujithe8324352009-01-16 21:38:42 +05302005
Sujithe8324352009-01-16 21:38:42 +05302006 if (sendbar)
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302007 tx_flags = ATH_TX_BAR;
Sujithe8324352009-01-16 21:38:42 +05302008
2009 if (!txok) {
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302010 tx_flags |= ATH_TX_ERROR;
Sujithe8324352009-01-16 21:38:42 +05302011
2012 if (bf_isxretried(bf))
Vasanthakumar Thiagarajan6b2c4032009-03-20 15:27:50 +05302013 tx_flags |= ATH_TX_XRETRY;
Sujithe8324352009-01-16 21:38:42 +05302014 }
2015
Ben Greearc1739eb32010-10-14 12:45:29 -07002016 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
Ben Greear6cf9e992010-10-14 12:45:30 -07002017 bf->bf_buf_addr = 0;
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002018
2019 if (bf->bf_state.bfs_paprd) {
Mohammed Shafi Shajakhan9cf04dc2011-02-04 18:38:23 +05302020 if (time_after(jiffies,
2021 bf->bf_state.bfs_paprd_timestamp +
2022 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002023 dev_kfree_skb_any(skb);
Vasanthakumar Thiagarajan78a18172010-06-24 02:42:46 -07002024 else
Vasanthakumar Thiagarajanca369eb2010-06-24 02:42:44 -07002025 complete(&sc->paprd_complete);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002026 } else {
Felix Fietkau5bec3e52011-01-24 21:29:25 +01002027 ath_debug_stat_tx(sc, bf, ts, txq);
Rajkumar Manoharan0f9dc292011-07-29 17:38:14 +05302028 ath_tx_complete(sc, skb, tx_flags, txq);
Felix Fietkau9f42c2b2010-06-12 00:34:01 -04002029 }
Ben Greear6cf9e992010-10-14 12:45:30 -07002030 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2031 * accidentally reference it later.
2032 */
2033 bf->bf_mpdu = NULL;
Sujithe8324352009-01-16 21:38:42 +05302034
2035 /*
2036 * Return the list of ath_buf of this mpdu to free queue
2037 */
2038 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2039 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2040 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
2041}
2042
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002043static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2044 struct ath_tx_status *ts, int nframes, int nbad,
2045 int txok, bool update_rc)
Sujithc4288392008-11-18 09:09:30 +05302046{
Sujitha22be222009-03-30 15:28:36 +05302047 struct sk_buff *skb = bf->bf_mpdu;
Sujith254ad0f2009-02-04 08:10:19 +05302048 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Sujithc4288392008-11-18 09:09:30 +05302049 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Felix Fietkau0cdd5c62011-01-24 19:23:17 +01002050 struct ieee80211_hw *hw = sc->hw;
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002051 struct ath_hw *ah = sc->sc_ah;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302052 u8 i, tx_rateindex;
Sujithc4288392008-11-18 09:09:30 +05302053
Sujith95e4acb2009-03-13 08:56:09 +05302054 if (txok)
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002055 tx_info->status.ack_signal = ts->ts_rssi;
Sujith95e4acb2009-03-13 08:56:09 +05302056
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002057 tx_rateindex = ts->ts_rateindex;
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302058 WARN_ON(tx_rateindex >= hw->max_rates);
2059
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002060 if (ts->ts_status & ATH9K_TXERR_FILT)
Sujithc4288392008-11-18 09:09:30 +05302061 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
Björn Smedmanebd02282010-10-10 22:44:39 +02002062 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
Felix Fietkaud9698472010-03-01 13:32:11 +01002063 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
Sujithc4288392008-11-18 09:09:30 +05302064
Felix Fietkaub572d032010-11-14 15:20:07 +01002065 BUG_ON(nbad > nframes);
Björn Smedmanebd02282010-10-10 22:44:39 +02002066
Felix Fietkaub572d032010-11-14 15:20:07 +01002067 tx_info->status.ampdu_len = nframes;
2068 tx_info->status.ampdu_ack_len = nframes - nbad;
Björn Smedmanebd02282010-10-10 22:44:39 +02002069 }
2070
Felix Fietkaudb1a0522010-03-29 20:07:11 -07002071 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302072 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002073 /*
2074 * If an underrun error is seen assume it as an excessive
2075 * retry only if max frame trigger level has been reached
2076 * (2 KB for single stream, and 4 KB for dual stream).
2077 * Adjust the long retry as if the frame was tried
2078 * hw->max_rate_tries times to affect how rate control updates
2079 * PER for the failed rate.
2080 * In case of congestion on the bus penalizing this type of
2081 * underruns should help hardware actually transmit new frames
2082 * successfully by eventually preferring slower rates.
2083 * This itself should also alleviate congestion on the bus.
2084 */
2085 if (ieee80211_is_data(hdr->frame_control) &&
2086 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2087 ATH9K_TX_DELIM_UNDERRUN)) &&
Felix Fietkau83860c52011-03-23 20:57:33 +01002088 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
Felix Fietkauf0c255a2010-11-11 03:18:35 +01002089 tx_info->status.rates[tx_rateindex].count =
2090 hw->max_rate_tries;
Sujithc4288392008-11-18 09:09:30 +05302091 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302092
Felix Fietkau545750d2009-11-23 22:21:01 +01002093 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302094 tx_info->status.rates[i].count = 0;
Felix Fietkau545750d2009-11-23 22:21:01 +01002095 tx_info->status.rates[i].idx = -1;
2096 }
Vasanthakumar Thiagarajan8a92e2e2009-03-20 15:27:49 +05302097
Felix Fietkau78c46532010-06-25 01:26:16 +02002098 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
Sujithc4288392008-11-18 09:09:30 +05302099}
2100
Felix Fietkaufce041b2011-05-19 12:20:25 +02002101static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2102 struct ath_tx_status *ts, struct ath_buf *bf,
2103 struct list_head *bf_head)
Rajkumar Manoharan5479de62011-07-17 11:43:02 +05302104 __releases(txq->axq_lock)
2105 __acquires(txq->axq_lock)
Felix Fietkaufce041b2011-05-19 12:20:25 +02002106{
2107 int txok;
2108
2109 txq->axq_depth--;
2110 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2111 txq->axq_tx_inprogress = false;
2112 if (bf_is_ampdu_not_probing(bf))
2113 txq->axq_ampdu_depth--;
2114
2115 spin_unlock_bh(&txq->axq_lock);
2116
2117 if (!bf_isampdu(bf)) {
2118 /*
2119 * This frame is sent out as a single frame.
2120 * Use hardware retry status for this frame.
2121 */
2122 if (ts->ts_status & ATH9K_TXERR_XRETRY)
2123 bf->bf_state.bf_type |= BUF_XRETRY;
2124 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
2125 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2126 } else
2127 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2128
2129 spin_lock_bh(&txq->axq_lock);
2130
2131 if (sc->sc_flags & SC_OP_TXAGGR)
2132 ath_txq_schedule(sc, txq);
2133}
2134
Sujithc4288392008-11-18 09:09:30 +05302135static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002136{
Sujithcbe61d82009-02-09 13:27:12 +05302137 struct ath_hw *ah = sc->sc_ah;
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002138 struct ath_common *common = ath9k_hw_common(ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002139 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2140 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +05302141 struct ath_desc *ds;
Felix Fietkau29bffa92010-03-29 20:14:23 -07002142 struct ath_tx_status ts;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002143 int status;
2144
Joe Perches226afe62010-12-02 19:12:37 -08002145 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2146 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2147 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002148
Felix Fietkaufce041b2011-05-19 12:20:25 +02002149 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002150 for (;;) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002151 if (list_empty(&txq->axq_q)) {
2152 txq->axq_link = NULL;
Felix Fietkau86271e42011-03-11 21:38:19 +01002153 if (sc->sc_flags & SC_OP_TXAGGR)
Ben Greear082f6532011-01-09 23:11:47 -08002154 ath_txq_schedule(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002155 break;
2156 }
2157 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2158
2159 /*
2160 * There is a race condition that a BH gets scheduled
2161 * after sw writes TxE and before hw re-load the last
2162 * descriptor to get the newly chained one.
2163 * Software must keep the last DONE descriptor as a
2164 * holding descriptor - software does so by marking
2165 * it with the STALE flag.
2166 */
2167 bf_held = NULL;
Sujitha119cc42009-03-30 15:28:38 +05302168 if (bf->bf_stale) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002169 bf_held = bf;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002170 if (list_is_last(&bf_held->list, &txq->axq_q))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002171 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002172
2173 bf = list_entry(bf_held->list.next, struct ath_buf,
2174 list);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002175 }
2176
2177 lastbf = bf->bf_lastbf;
Sujithe8324352009-01-16 21:38:42 +05302178 ds = lastbf->bf_desc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002179
Felix Fietkau29bffa92010-03-29 20:14:23 -07002180 memset(&ts, 0, sizeof(ts));
2181 status = ath9k_hw_txprocdesc(ah, ds, &ts);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002182 if (status == -EINPROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002183 break;
Felix Fietkaufce041b2011-05-19 12:20:25 +02002184
Ben Greear2dac4fb2011-01-09 23:11:45 -08002185 TX_STAT_INC(txq->axq_qnum, txprocdesc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002186
2187 /*
2188 * Remove ath_buf's of the same transmit unit from txq,
2189 * however leave the last descriptor back as the holding
2190 * descriptor for hw.
2191 */
Sujitha119cc42009-03-30 15:28:38 +05302192 lastbf->bf_stale = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002193 INIT_LIST_HEAD(&bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002194 if (!list_is_singular(&lastbf->list))
2195 list_cut_position(&bf_head,
2196 &txq->axq_q, lastbf->list.prev);
2197
Felix Fietkaufce041b2011-05-19 12:20:25 +02002198 if (bf_held) {
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002199 list_del(&bf_held->list);
Felix Fietkau0a8cea82010-04-19 19:57:30 +02002200 ath_tx_return_buffer(sc, bf_held);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002201 }
Johannes Berge6a98542008-10-21 12:40:02 +02002202
Felix Fietkaufce041b2011-05-19 12:20:25 +02002203 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002204 }
Felix Fietkaufce041b2011-05-19 12:20:25 +02002205 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002206}
2207
Sujith305fe472009-07-23 15:32:29 +05302208static void ath_tx_complete_poll_work(struct work_struct *work)
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002209{
2210 struct ath_softc *sc = container_of(work, struct ath_softc,
2211 tx_complete_work.work);
2212 struct ath_txq *txq;
2213 int i;
2214 bool needreset = false;
Ben Greear60f2d1d2011-01-09 23:11:52 -08002215#ifdef CONFIG_ATH9K_DEBUGFS
2216 sc->tx_complete_poll_work_seen++;
2217#endif
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002218
2219 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2220 if (ATH_TXQ_SETUP(sc, i)) {
2221 txq = &sc->tx.txq[i];
2222 spin_lock_bh(&txq->axq_lock);
2223 if (txq->axq_depth) {
2224 if (txq->axq_tx_inprogress) {
2225 needreset = true;
2226 spin_unlock_bh(&txq->axq_lock);
2227 break;
2228 } else {
2229 txq->axq_tx_inprogress = true;
2230 }
2231 }
2232 spin_unlock_bh(&txq->axq_lock);
2233 }
2234
2235 if (needreset) {
Joe Perches226afe62010-12-02 19:12:37 -08002236 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2237 "tx hung, resetting the chip\n");
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +05302238 spin_lock_bh(&sc->sc_pcu_lock);
Felix Fietkaufac6b6a2010-10-23 17:45:38 +02002239 ath_reset(sc, true);
Rajkumar Manoharanf6b4e4d2011-06-24 17:38:13 +05302240 spin_unlock_bh(&sc->sc_pcu_lock);
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002241 }
2242
Luis R. Rodriguez42935ec2009-07-29 20:08:07 -04002243 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002244 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2245}
2246
2247
Sujithe8324352009-01-16 21:38:42 +05302248
2249void ath_tx_tasklet(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002250{
Sujithe8324352009-01-16 21:38:42 +05302251 int i;
2252 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002253
Sujithe8324352009-01-16 21:38:42 +05302254 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002255
2256 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
Sujithe8324352009-01-16 21:38:42 +05302257 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2258 ath_tx_processq(sc, &sc->tx.txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002259 }
2260}
2261
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002262void ath_tx_edma_tasklet(struct ath_softc *sc)
2263{
Felix Fietkaufce041b2011-05-19 12:20:25 +02002264 struct ath_tx_status ts;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002265 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2266 struct ath_hw *ah = sc->sc_ah;
2267 struct ath_txq *txq;
2268 struct ath_buf *bf, *lastbf;
2269 struct list_head bf_head;
2270 int status;
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002271
2272 for (;;) {
Felix Fietkaufce041b2011-05-19 12:20:25 +02002273 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002274 if (status == -EINPROGRESS)
2275 break;
2276 if (status == -EIO) {
Joe Perches226afe62010-12-02 19:12:37 -08002277 ath_dbg(common, ATH_DBG_XMIT,
2278 "Error processing tx status\n");
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002279 break;
2280 }
2281
2282 /* Skip beacon completions */
Felix Fietkaufce041b2011-05-19 12:20:25 +02002283 if (ts.qid == sc->beacon.beaconq)
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002284 continue;
2285
Felix Fietkaufce041b2011-05-19 12:20:25 +02002286 txq = &sc->tx.txq[ts.qid];
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002287
2288 spin_lock_bh(&txq->axq_lock);
Felix Fietkaufce041b2011-05-19 12:20:25 +02002289
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002290 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2291 spin_unlock_bh(&txq->axq_lock);
2292 return;
2293 }
2294
2295 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2296 struct ath_buf, list);
2297 lastbf = bf->bf_lastbf;
2298
2299 INIT_LIST_HEAD(&bf_head);
2300 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2301 &lastbf->list);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002302
Felix Fietkaufce041b2011-05-19 12:20:25 +02002303 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2304 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002305
Felix Fietkaufce041b2011-05-19 12:20:25 +02002306 if (!list_empty(&txq->axq_q)) {
2307 struct list_head bf_q;
2308
2309 INIT_LIST_HEAD(&bf_q);
2310 txq->axq_link = NULL;
2311 list_splice_tail_init(&txq->axq_q, &bf_q);
2312 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2313 }
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002314 }
2315
Felix Fietkaufce041b2011-05-19 12:20:25 +02002316 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
Vasanthakumar Thiagarajane5003242010-04-15 17:39:36 -04002317 spin_unlock_bh(&txq->axq_lock);
2318 }
2319}
2320
Sujithe8324352009-01-16 21:38:42 +05302321/*****************/
2322/* Init, Cleanup */
2323/*****************/
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002324
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002325static int ath_txstatus_setup(struct ath_softc *sc, int size)
2326{
2327 struct ath_descdma *dd = &sc->txsdma;
2328 u8 txs_len = sc->sc_ah->caps.txs_len;
2329
2330 dd->dd_desc_len = size * txs_len;
2331 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2332 &dd->dd_desc_paddr, GFP_KERNEL);
2333 if (!dd->dd_desc)
2334 return -ENOMEM;
2335
2336 return 0;
2337}
2338
2339static int ath_tx_edma_init(struct ath_softc *sc)
2340{
2341 int err;
2342
2343 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2344 if (!err)
2345 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2346 sc->txsdma.dd_desc_paddr,
2347 ATH_TXSTATUS_RING_SIZE);
2348
2349 return err;
2350}
2351
2352static void ath_tx_edma_cleanup(struct ath_softc *sc)
2353{
2354 struct ath_descdma *dd = &sc->txsdma;
2355
2356 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2357 dd->dd_desc_paddr);
2358}
2359
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002360int ath_tx_init(struct ath_softc *sc, int nbufs)
2361{
Luis R. Rodriguezc46917b2009-09-13 02:42:02 -07002362 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002363 int error = 0;
2364
Sujith797fe5cb2009-03-30 15:28:45 +05302365 spin_lock_init(&sc->tx.txbuflock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002366
Sujith797fe5cb2009-03-30 15:28:45 +05302367 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
Vasanthakumar Thiagarajan4adfcde2010-04-15 17:39:33 -04002368 "tx", nbufs, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302369 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002370 ath_err(common,
2371 "Failed to allocate tx descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302372 goto err;
2373 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002374
Sujith797fe5cb2009-03-30 15:28:45 +05302375 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002376 "beacon", ATH_BCBUF, 1, 1);
Sujith797fe5cb2009-03-30 15:28:45 +05302377 if (error != 0) {
Joe Perches38002762010-12-02 19:12:36 -08002378 ath_err(common,
2379 "Failed to allocate beacon descriptors: %d\n", error);
Sujith797fe5cb2009-03-30 15:28:45 +05302380 goto err;
2381 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002382
Senthil Balasubramanian164ace32009-07-14 20:17:09 -04002383 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2384
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002385 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2386 error = ath_tx_edma_init(sc);
2387 if (error)
2388 goto err;
2389 }
2390
Sujith797fe5cb2009-03-30 15:28:45 +05302391err:
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002392 if (error != 0)
2393 ath_tx_cleanup(sc);
2394
2395 return error;
2396}
2397
Sujith797fe5cb2009-03-30 15:28:45 +05302398void ath_tx_cleanup(struct ath_softc *sc)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002399{
Sujithb77f4832008-12-07 21:44:03 +05302400 if (sc->beacon.bdma.dd_desc_len != 0)
2401 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002402
Sujithb77f4832008-12-07 21:44:03 +05302403 if (sc->tx.txdma.dd_desc_len != 0)
2404 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
Vasanthakumar Thiagarajan5088c2f2010-04-15 17:39:34 -04002405
2406 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2407 ath_tx_edma_cleanup(sc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002408}
2409
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002410void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2411{
Sujithc5170162008-10-29 10:13:59 +05302412 struct ath_atx_tid *tid;
2413 struct ath_atx_ac *ac;
2414 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002415
Sujith8ee5afb2008-12-07 21:43:36 +05302416 for (tidno = 0, tid = &an->tid[tidno];
Sujithc5170162008-10-29 10:13:59 +05302417 tidno < WME_NUM_TID;
2418 tidno++, tid++) {
2419 tid->an = an;
2420 tid->tidno = tidno;
2421 tid->seq_start = tid->seq_next = 0;
2422 tid->baw_size = WME_MAX_BA;
2423 tid->baw_head = tid->baw_tail = 0;
2424 tid->sched = false;
Sujithe8324352009-01-16 21:38:42 +05302425 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302426 tid->state &= ~AGGR_CLEANUP;
Felix Fietkau56dc6332011-08-28 00:32:22 +02002427 __skb_queue_head_init(&tid->buf_q);
Sujithc5170162008-10-29 10:13:59 +05302428 acno = TID_TO_WME_AC(tidno);
Sujith8ee5afb2008-12-07 21:43:36 +05302429 tid->ac = &an->ac[acno];
Sujitha37c2c72008-10-29 10:15:40 +05302430 tid->state &= ~AGGR_ADDBA_COMPLETE;
2431 tid->state &= ~AGGR_ADDBA_PROGRESS;
Sujithc5170162008-10-29 10:13:59 +05302432 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002433
Sujith8ee5afb2008-12-07 21:43:36 +05302434 for (acno = 0, ac = &an->ac[acno];
Sujithc5170162008-10-29 10:13:59 +05302435 acno < WME_NUM_AC; acno++, ac++) {
2436 ac->sched = false;
Felix Fietkau066dae92010-11-07 14:59:39 +01002437 ac->txq = sc->tx.txq_map[acno];
Sujithc5170162008-10-29 10:13:59 +05302438 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002439 }
2440}
2441
Sujithb5aa9bf2008-10-29 10:13:31 +05302442void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002443{
Felix Fietkau2b409942010-07-07 19:42:08 +02002444 struct ath_atx_ac *ac;
2445 struct ath_atx_tid *tid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002446 struct ath_txq *txq;
Felix Fietkau066dae92010-11-07 14:59:39 +01002447 int tidno;
Sujithe8324352009-01-16 21:38:42 +05302448
Felix Fietkau2b409942010-07-07 19:42:08 +02002449 for (tidno = 0, tid = &an->tid[tidno];
2450 tidno < WME_NUM_TID; tidno++, tid++) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002451
Felix Fietkau2b409942010-07-07 19:42:08 +02002452 ac = tid->ac;
Felix Fietkau066dae92010-11-07 14:59:39 +01002453 txq = ac->txq;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002454
Felix Fietkau2b409942010-07-07 19:42:08 +02002455 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002456
Felix Fietkau2b409942010-07-07 19:42:08 +02002457 if (tid->sched) {
2458 list_del(&tid->list);
2459 tid->sched = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002460 }
Felix Fietkau2b409942010-07-07 19:42:08 +02002461
2462 if (ac->sched) {
2463 list_del(&ac->list);
2464 tid->ac->sched = false;
2465 }
2466
2467 ath_tid_drain(sc, txq, tid);
2468 tid->state &= ~AGGR_ADDBA_COMPLETE;
2469 tid->state &= ~AGGR_CLEANUP;
2470
2471 spin_unlock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002472 }
2473}