blob: fc52f61ef3ed4eb5e2f42a71066e1b434223646a [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070017#include "core.h"
18
19#define BITS_PER_BYTE 8
20#define OFDM_PLCP_BITS 22
21#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
22#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
23#define L_STF 8
24#define L_LTF 8
25#define L_SIG 4
26#define HT_SIG 8
27#define HT_STF 4
28#define HT_LTF(_ns) (4 * (_ns))
29#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
33
34#define OFDM_SIFS_TIME 16
35
36static u32 bits_per_symbol[][2] = {
37 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
46 { 52, 108 }, /* 8: BPSK */
47 { 104, 216 }, /* 9: QPSK 1/2 */
48 { 156, 324 }, /* 10: QPSK 3/4 */
49 { 208, 432 }, /* 11: 16-QAM 1/2 */
50 { 312, 648 }, /* 12: 16-QAM 3/4 */
51 { 416, 864 }, /* 13: 64-QAM 2/3 */
52 { 468, 972 }, /* 14: 64-QAM 3/4 */
53 { 520, 1080 }, /* 15: 64-QAM 5/6 */
54};
55
56#define IS_HT_RATE(_rate) ((_rate) & 0x80)
57
58/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070059 * Insert a chain of ath_buf (descriptors) on a txq and
60 * assume the descriptors are already chained together by caller.
61 * NB: must be called with txq lock held
62 */
63
Sujith102e0572008-10-29 10:15:16 +053064static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
65 struct list_head *head)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070066{
67 struct ath_hal *ah = sc->sc_ah;
68 struct ath_buf *bf;
Sujith102e0572008-10-29 10:15:16 +053069
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070070 /*
71 * Insert the frame on the outbound list and
72 * pass it on to the hardware.
73 */
74
75 if (list_empty(head))
76 return;
77
78 bf = list_first_entry(head, struct ath_buf, list);
79
80 list_splice_tail_init(head, &txq->axq_q);
81 txq->axq_depth++;
82 txq->axq_totalqueued++;
83 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
84
85 DPRINTF(sc, ATH_DBG_QUEUE,
Sujith04bd46382008-11-28 22:18:05 +053086 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070087
88 if (txq->axq_link == NULL) {
89 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
90 DPRINTF(sc, ATH_DBG_XMIT,
Sujith04bd46382008-11-28 22:18:05 +053091 "TXDP[%u] = %llx (%p)\n",
92 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070093 } else {
94 *txq->axq_link = bf->bf_daddr;
Sujith04bd46382008-11-28 22:18:05 +053095 DPRINTF(sc, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070096 txq->axq_qnum, txq->axq_link,
97 ito64(bf->bf_daddr), bf->bf_desc);
98 }
99 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
100 ath9k_hw_txstart(ah, txq->axq_qnum);
101}
102
Sujithc4288392008-11-18 09:09:30 +0530103static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
104 struct ath_xmit_status *tx_status)
105{
106 struct ieee80211_hw *hw = sc->hw;
107 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
108 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
109
Sujith04bd46382008-11-28 22:18:05 +0530110 DPRINTF(sc, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
Sujithc4288392008-11-18 09:09:30 +0530111
112 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
113 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
114 kfree(tx_info_priv);
115 tx_info->rate_driver_data[0] = NULL;
116 }
117
118 if (tx_status->flags & ATH_TX_BAR) {
119 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
120 tx_status->flags &= ~ATH_TX_BAR;
121 }
122
123 if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
124 /* Frame was ACKed */
125 tx_info->flags |= IEEE80211_TX_STAT_ACK;
126 }
127
128 tx_info->status.rates[0].count = tx_status->retries + 1;
129
130 ieee80211_tx_status(hw, skb);
131}
132
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700133/* Check if it's okay to send out aggregates */
134
Sujitha37c2c72008-10-29 10:15:40 +0530135static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700136{
137 struct ath_atx_tid *tid;
138 tid = ATH_AN_2_TID(an, tidno);
139
Sujitha37c2c72008-10-29 10:15:40 +0530140 if (tid->state & AGGR_ADDBA_COMPLETE ||
141 tid->state & AGGR_ADDBA_PROGRESS)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700142 return 1;
143 else
144 return 0;
145}
146
Sujithff37e332008-11-24 12:07:55 +0530147static void ath_get_beaconconfig(struct ath_softc *sc, int if_id,
148 struct ath_beacon_config *conf)
149{
150 struct ieee80211_hw *hw = sc->hw;
151
152 /* fill in beacon config data */
153
154 conf->beacon_interval = hw->conf.beacon_int;
155 conf->listen_interval = 100;
156 conf->dtim_count = 1;
157 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
158}
159
Sujith528f0c62008-10-29 10:14:26 +0530160/* Calculate Atheros packet type from IEEE80211 packet header */
161
162static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700163{
Sujith528f0c62008-10-29 10:14:26 +0530164 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700165 enum ath9k_pkt_type htype;
166 __le16 fc;
167
Sujith528f0c62008-10-29 10:14:26 +0530168 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700169 fc = hdr->frame_control;
170
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700171 if (ieee80211_is_beacon(fc))
172 htype = ATH9K_PKT_TYPE_BEACON;
173 else if (ieee80211_is_probe_resp(fc))
174 htype = ATH9K_PKT_TYPE_PROBE_RESP;
175 else if (ieee80211_is_atim(fc))
176 htype = ATH9K_PKT_TYPE_ATIM;
177 else if (ieee80211_is_pspoll(fc))
178 htype = ATH9K_PKT_TYPE_PSPOLL;
179 else
180 htype = ATH9K_PKT_TYPE_NORMAL;
181
182 return htype;
183}
184
Sujitha8efee42008-11-18 09:07:30 +0530185static bool is_pae(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700186{
187 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700188 __le16 fc;
189
190 hdr = (struct ieee80211_hdr *)skb->data;
191 fc = hdr->frame_control;
Johannes Berge6a98542008-10-21 12:40:02 +0200192
Sujitha8efee42008-11-18 09:07:30 +0530193 if (ieee80211_is_data(fc)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700194 if (ieee80211_is_nullfunc(fc) ||
Sujith528f0c62008-10-29 10:14:26 +0530195 /* Port Access Entity (IEEE 802.1X) */
196 (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
Sujitha8efee42008-11-18 09:07:30 +0530197 return true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700198 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700199 }
200
Sujitha8efee42008-11-18 09:07:30 +0530201 return false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700202}
203
Sujith528f0c62008-10-29 10:14:26 +0530204static int get_hw_crypto_keytype(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700205{
Sujith528f0c62008-10-29 10:14:26 +0530206 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
207
208 if (tx_info->control.hw_key) {
209 if (tx_info->control.hw_key->alg == ALG_WEP)
210 return ATH9K_KEY_TYPE_WEP;
211 else if (tx_info->control.hw_key->alg == ALG_TKIP)
212 return ATH9K_KEY_TYPE_TKIP;
213 else if (tx_info->control.hw_key->alg == ALG_CCMP)
214 return ATH9K_KEY_TYPE_AES;
215 }
216
217 return ATH9K_KEY_TYPE_CLEAR;
218}
219
Sujith528f0c62008-10-29 10:14:26 +0530220/* Called only when tx aggregation is enabled and HT is supported */
221
222static void assign_aggr_tid_seqno(struct sk_buff *skb,
223 struct ath_buf *bf)
224{
225 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
226 struct ieee80211_hdr *hdr;
227 struct ath_node *an;
228 struct ath_atx_tid *tid;
229 __le16 fc;
230 u8 *qc;
231
232 if (!tx_info->control.sta)
233 return;
234
235 an = (struct ath_node *)tx_info->control.sta->drv_priv;
236 hdr = (struct ieee80211_hdr *)skb->data;
237 fc = hdr->frame_control;
238
239 /* Get tidno */
240
241 if (ieee80211_is_data_qos(fc)) {
242 qc = ieee80211_get_qos_ctl(hdr);
243 bf->bf_tidno = qc[0] & 0xf;
Sujith98deeea2008-08-11 14:05:46 +0530244 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700245
Sujith528f0c62008-10-29 10:14:26 +0530246 /* Get seqno */
247
Sujitha8efee42008-11-18 09:07:30 +0530248 if (ieee80211_is_data(fc) && !is_pae(skb)) {
Sujith528f0c62008-10-29 10:14:26 +0530249 /* For HT capable stations, we save tidno for later use.
250 * We also override seqno set by upper layer with the one
251 * in tx aggregation state.
252 *
253 * If fragmentation is on, the sequence number is
254 * not overridden, since it has been
255 * incremented by the fragmentation routine.
256 *
257 * FIXME: check if the fragmentation threshold exceeds
258 * IEEE80211 max.
259 */
260 tid = ATH_AN_2_TID(an, bf->bf_tidno);
261 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
262 IEEE80211_SEQ_SEQ_SHIFT);
263 bf->bf_seqno = tid->seq_next;
264 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
265 }
266}
267
268static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
269 struct ath_txq *txq)
270{
271 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
272 int flags = 0;
273
274 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
275 flags |= ATH9K_TXDESC_INTREQ;
276
277 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
278 flags |= ATH9K_TXDESC_NOACK;
279 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
280 flags |= ATH9K_TXDESC_RTSENA;
281
282 return flags;
283}
284
285static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
286{
287 struct ath_buf *bf = NULL;
288
289 spin_lock_bh(&sc->sc_txbuflock);
290
291 if (unlikely(list_empty(&sc->sc_txbuf))) {
292 spin_unlock_bh(&sc->sc_txbuflock);
293 return NULL;
294 }
295
296 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
297 list_del(&bf->list);
298
299 spin_unlock_bh(&sc->sc_txbuflock);
300
301 return bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700302}
303
304/* To complete a chain of buffers associated a frame */
305
306static void ath_tx_complete_buf(struct ath_softc *sc,
307 struct ath_buf *bf,
308 struct list_head *bf_q,
309 int txok, int sendbar)
310{
311 struct sk_buff *skb = bf->bf_mpdu;
312 struct ath_xmit_status tx_status;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700313
314 /*
315 * Set retry information.
316 * NB: Don't use the information in the descriptor, because the frame
317 * could be software retried.
318 */
319 tx_status.retries = bf->bf_retries;
320 tx_status.flags = 0;
321
322 if (sendbar)
323 tx_status.flags = ATH_TX_BAR;
324
325 if (!txok) {
326 tx_status.flags |= ATH_TX_ERROR;
327
Sujithcd3d39a2008-08-11 14:03:34 +0530328 if (bf_isxretried(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700329 tx_status.flags |= ATH_TX_XRETRY;
330 }
Sujith102e0572008-10-29 10:15:16 +0530331
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700332 /* Unmap this frame */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700333 pci_unmap_single(sc->pdev,
Sujithff9b6622008-08-14 13:27:16 +0530334 bf->bf_dmacontext,
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700335 skb->len,
336 PCI_DMA_TODEVICE);
337 /* complete this frame */
Sujith528f0c62008-10-29 10:14:26 +0530338 ath_tx_complete(sc, skb, &tx_status);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700339
340 /*
341 * Return the list of ath_buf of this mpdu to free queue
342 */
343 spin_lock_bh(&sc->sc_txbuflock);
344 list_splice_tail_init(bf_q, &sc->sc_txbuf);
345 spin_unlock_bh(&sc->sc_txbuflock);
346}
347
348/*
349 * queue up a dest/ac pair for tx scheduling
350 * NB: must be called with txq lock held
351 */
352
353static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
354{
355 struct ath_atx_ac *ac = tid->ac;
356
357 /*
358 * if tid is paused, hold off
359 */
360 if (tid->paused)
361 return;
362
363 /*
364 * add tid to ac atmost once
365 */
366 if (tid->sched)
367 return;
368
369 tid->sched = true;
370 list_add_tail(&tid->list, &ac->tid_q);
371
372 /*
373 * add node ac to txq atmost once
374 */
375 if (ac->sched)
376 return;
377
378 ac->sched = true;
379 list_add_tail(&ac->list, &txq->axq_acq);
380}
381
382/* pause a tid */
383
384static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
385{
386 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
387
388 spin_lock_bh(&txq->axq_lock);
389
390 tid->paused++;
391
392 spin_unlock_bh(&txq->axq_lock);
393}
394
395/* resume a tid and schedule aggregate */
396
397void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
398{
399 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
400
401 ASSERT(tid->paused > 0);
402 spin_lock_bh(&txq->axq_lock);
403
404 tid->paused--;
405
406 if (tid->paused > 0)
407 goto unlock;
408
409 if (list_empty(&tid->buf_q))
410 goto unlock;
411
412 /*
413 * Add this TID to scheduler and try to send out aggregates
414 */
415 ath_tx_queue_tid(txq, tid);
416 ath_txq_schedule(sc, txq);
417unlock:
418 spin_unlock_bh(&txq->axq_lock);
419}
420
421/* Compute the number of bad frames */
422
Sujithb5aa9bf2008-10-29 10:13:31 +0530423static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
424 int txok)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700425{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700426 struct ath_buf *bf_last = bf->bf_lastbf;
427 struct ath_desc *ds = bf_last->bf_desc;
428 u16 seq_st = 0;
429 u32 ba[WME_BA_BMP_SIZE >> 5];
430 int ba_index;
431 int nbad = 0;
432 int isaggr = 0;
433
Sujithb5aa9bf2008-10-29 10:13:31 +0530434 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700435 return 0;
436
Sujithcd3d39a2008-08-11 14:03:34 +0530437 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700438 if (isaggr) {
439 seq_st = ATH_DS_BA_SEQ(ds);
440 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
441 }
442
443 while (bf) {
444 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
445 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
446 nbad++;
447
448 bf = bf->bf_next;
449 }
450
451 return nbad;
452}
453
454static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
455{
456 struct sk_buff *skb;
457 struct ieee80211_hdr *hdr;
458
Sujithcd3d39a2008-08-11 14:03:34 +0530459 bf->bf_state.bf_type |= BUF_RETRY;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700460 bf->bf_retries++;
461
462 skb = bf->bf_mpdu;
463 hdr = (struct ieee80211_hdr *)skb->data;
464 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
465}
466
467/* Update block ack window */
468
Sujith102e0572008-10-29 10:15:16 +0530469static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
470 int seqno)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700471{
472 int index, cindex;
473
474 index = ATH_BA_INDEX(tid->seq_start, seqno);
475 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
476
477 tid->tx_buf[cindex] = NULL;
478
479 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
480 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
481 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
482 }
483}
484
485/*
486 * ath_pkt_dur - compute packet duration (NB: not NAV)
487 *
488 * rix - rate index
489 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
490 * width - 0 for 20 MHz, 1 for 40 MHz
491 * half_gi - to use 4us v/s 3.6 us for symbol time
492 */
Sujith102e0572008-10-29 10:15:16 +0530493static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
494 int width, int half_gi, bool shortPreamble)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700495{
Sujithe63835b2008-11-18 09:07:53 +0530496 struct ath_rate_table *rate_table = sc->hw_rate_table[sc->sc_curmode];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700497 u32 nbits, nsymbits, duration, nsymbols;
498 u8 rc;
499 int streams, pktlen;
500
Sujithcd3d39a2008-08-11 14:03:34 +0530501 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
Sujithe63835b2008-11-18 09:07:53 +0530502 rc = rate_table->info[rix].ratecode;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700503
Sujithe63835b2008-11-18 09:07:53 +0530504 /* for legacy rates, use old function to compute packet duration */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700505 if (!IS_HT_RATE(rc))
Sujithe63835b2008-11-18 09:07:53 +0530506 return ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
507 rix, shortPreamble);
508
509 /* find number of symbols: PLCP + data */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700510 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
511 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
512 nsymbols = (nbits + nsymbits - 1) / nsymbits;
513
514 if (!half_gi)
515 duration = SYMBOL_TIME(nsymbols);
516 else
517 duration = SYMBOL_TIME_HALFGI(nsymbols);
518
Sujithe63835b2008-11-18 09:07:53 +0530519 /* addup duration for legacy/ht training and signal fields */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700520 streams = HT_RC_2_STREAMS(rc);
521 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
Sujith102e0572008-10-29 10:15:16 +0530522
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700523 return duration;
524}
525
526/* Rate module function to set rate related fields in tx descriptor */
527
528static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
529{
530 struct ath_hal *ah = sc->sc_ah;
Sujithe63835b2008-11-18 09:07:53 +0530531 struct ath_rate_table *rt;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700532 struct ath_desc *ds = bf->bf_desc;
533 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
534 struct ath9k_11n_rate_series series[4];
Sujith528f0c62008-10-29 10:14:26 +0530535 struct sk_buff *skb;
536 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +0530537 struct ieee80211_tx_rate *rates;
Sujithe63835b2008-11-18 09:07:53 +0530538 struct ieee80211_hdr *hdr;
539 int i, flags, rtsctsena = 0;
540 u32 ctsduration = 0;
541 u8 rix = 0, cix, ctsrate = 0;
542 __le16 fc;
543
544 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Sujith528f0c62008-10-29 10:14:26 +0530545
546 skb = (struct sk_buff *)bf->bf_mpdu;
Sujithe63835b2008-11-18 09:07:53 +0530547 hdr = (struct ieee80211_hdr *)skb->data;
548 fc = hdr->frame_control;
Sujith528f0c62008-10-29 10:14:26 +0530549 tx_info = IEEE80211_SKB_CB(skb);
Sujithe63835b2008-11-18 09:07:53 +0530550 rates = tx_info->control.rates;
Sujith528f0c62008-10-29 10:14:26 +0530551
Sujithe63835b2008-11-18 09:07:53 +0530552 if (ieee80211_has_morefrags(fc) ||
553 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
554 rates[1].count = rates[2].count = rates[3].count = 0;
555 rates[1].idx = rates[2].idx = rates[3].idx = 0;
556 rates[0].count = ATH_TXMAXTRY;
557 }
558
559 /* get the cix for the lowest valid rix */
560 rt = sc->hw_rate_table[sc->sc_curmode];
Sujitha8efee42008-11-18 09:07:30 +0530561 for (i = 3; i >= 0; i--) {
Sujithe63835b2008-11-18 09:07:53 +0530562 if (rates[i].count && (rates[i].idx >= 0)) {
Sujitha8efee42008-11-18 09:07:30 +0530563 rix = rates[i].idx;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700564 break;
565 }
566 }
Sujithe63835b2008-11-18 09:07:53 +0530567
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700568 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
Sujithe63835b2008-11-18 09:07:53 +0530569 cix = rt->info[rix].ctrl_rate;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700570
571 /*
Sujithe63835b2008-11-18 09:07:53 +0530572 * If 802.11g protection is enabled, determine whether to use RTS/CTS or
573 * just CTS. Note that this is only done for OFDM/HT unicast frames.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700574 */
Sujithe63835b2008-11-18 09:07:53 +0530575 if (sc->sc_protmode != PROT_M_NONE && !(bf->bf_flags & ATH9K_TXDESC_NOACK)
Sujith46d14a52008-11-18 09:08:13 +0530576 && (rt->info[rix].phy == WLAN_RC_PHY_OFDM ||
Sujithe63835b2008-11-18 09:07:53 +0530577 WLAN_RC_PHY_HT(rt->info[rix].phy))) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700578 if (sc->sc_protmode == PROT_M_RTSCTS)
579 flags = ATH9K_TXDESC_RTSENA;
580 else if (sc->sc_protmode == PROT_M_CTSONLY)
581 flags = ATH9K_TXDESC_CTSENA;
582
Sujithe63835b2008-11-18 09:07:53 +0530583 cix = rt->info[sc->sc_protrix].ctrl_rate;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700584 rtsctsena = 1;
585 }
586
Sujithe63835b2008-11-18 09:07:53 +0530587 /* For 11n, the default behavior is to enable RTS for hw retried frames.
588 * We enable the global flag here and let rate series flags determine
589 * which rates will actually use RTS.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700590 */
Sujithcd3d39a2008-08-11 14:03:34 +0530591 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
Sujithe63835b2008-11-18 09:07:53 +0530592 /* 802.11g protection not needed, use our default behavior */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700593 if (!rtsctsena)
594 flags = ATH9K_TXDESC_RTSENA;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700595 }
596
Sujithe63835b2008-11-18 09:07:53 +0530597 /* Set protection if aggregate protection on */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700598 if (sc->sc_config.ath_aggr_prot &&
Sujithcd3d39a2008-08-11 14:03:34 +0530599 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700600 flags = ATH9K_TXDESC_RTSENA;
Sujithe63835b2008-11-18 09:07:53 +0530601 cix = rt->info[sc->sc_protrix].ctrl_rate;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700602 rtsctsena = 1;
603 }
604
Sujithe63835b2008-11-18 09:07:53 +0530605 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
606 if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700607 flags &= ~(ATH9K_TXDESC_RTSENA);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700608
609 /*
Sujithe63835b2008-11-18 09:07:53 +0530610 * CTS transmit rate is derived from the transmit rate by looking in the
611 * h/w rate table. We must also factor in whether or not a short
612 * preamble is to be used. NB: cix is set above where RTS/CTS is enabled
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700613 */
Sujithe63835b2008-11-18 09:07:53 +0530614 ctsrate = rt->info[cix].ratecode |
615 (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700616
617 for (i = 0; i < 4; i++) {
Sujithe63835b2008-11-18 09:07:53 +0530618 if (!rates[i].count || (rates[i].idx < 0))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700619 continue;
620
Sujitha8efee42008-11-18 09:07:30 +0530621 rix = rates[i].idx;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700622
Sujithe63835b2008-11-18 09:07:53 +0530623 series[i].Rate = rt->info[rix].ratecode |
624 (bf_isshpreamble(bf) ? rt->info[rix].short_preamble : 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700625
Sujitha8efee42008-11-18 09:07:30 +0530626 series[i].Tries = rates[i].count;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700627
628 series[i].RateFlags = (
Sujitha8efee42008-11-18 09:07:30 +0530629 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) ?
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700630 ATH9K_RATESERIES_RTS_CTS : 0) |
Sujitha8efee42008-11-18 09:07:30 +0530631 ((rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ?
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700632 ATH9K_RATESERIES_2040 : 0) |
Sujitha8efee42008-11-18 09:07:30 +0530633 ((rates[i].flags & IEEE80211_TX_RC_SHORT_GI) ?
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700634 ATH9K_RATESERIES_HALFGI : 0);
635
Sujith102e0572008-10-29 10:15:16 +0530636 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
Sujitha8efee42008-11-18 09:07:30 +0530637 (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0,
638 (rates[i].flags & IEEE80211_TX_RC_SHORT_GI),
Sujith102e0572008-10-29 10:15:16 +0530639 bf_isshpreamble(bf));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700640
Sujithff37e332008-11-24 12:07:55 +0530641 series[i].ChSel = sc->sc_tx_chainmask;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700642
643 if (rtsctsena)
644 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700645 }
646
Sujithe63835b2008-11-18 09:07:53 +0530647 /* set dur_update_en for l-sig computation except for PS-Poll frames */
648 ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf),
649 ctsrate, ctsduration,
Sujithcd3d39a2008-08-11 14:03:34 +0530650 series, 4, flags);
Sujith102e0572008-10-29 10:15:16 +0530651
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700652 if (sc->sc_config.ath_aggr_prot && flags)
653 ath9k_hw_set11n_burstduration(ah, ds, 8192);
654}
655
656/*
657 * Function to send a normal HT (non-AMPDU) frame
658 * NB: must be called with txq lock held
659 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700660static int ath_tx_send_normal(struct ath_softc *sc,
661 struct ath_txq *txq,
662 struct ath_atx_tid *tid,
663 struct list_head *bf_head)
664{
665 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700666
667 BUG_ON(list_empty(bf_head));
668
669 bf = list_first_entry(bf_head, struct ath_buf, list);
Sujithcd3d39a2008-08-11 14:03:34 +0530670 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700671
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700672 /* update starting sequence number for subsequent ADDBA request */
673 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
674
675 /* Queue to h/w without aggregation */
676 bf->bf_nframes = 1;
677 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
678 ath_buf_set_rate(sc, bf);
679 ath_tx_txqaddbuf(sc, txq, bf_head);
680
681 return 0;
682}
683
684/* flush tid's software queue and send frames as non-ampdu's */
685
686static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
687{
688 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
689 struct ath_buf *bf;
690 struct list_head bf_head;
691 INIT_LIST_HEAD(&bf_head);
692
693 ASSERT(tid->paused > 0);
694 spin_lock_bh(&txq->axq_lock);
695
696 tid->paused--;
697
698 if (tid->paused > 0) {
699 spin_unlock_bh(&txq->axq_lock);
700 return;
701 }
702
703 while (!list_empty(&tid->buf_q)) {
704 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithcd3d39a2008-08-11 14:03:34 +0530705 ASSERT(!bf_isretried(bf));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700706 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
707 ath_tx_send_normal(sc, txq, tid, &bf_head);
708 }
709
710 spin_unlock_bh(&txq->axq_lock);
711}
712
713/* Completion routine of an aggregate */
714
715static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
716 struct ath_txq *txq,
717 struct ath_buf *bf,
718 struct list_head *bf_q,
719 int txok)
720{
Sujith528f0c62008-10-29 10:14:26 +0530721 struct ath_node *an = NULL;
722 struct sk_buff *skb;
723 struct ieee80211_tx_info *tx_info;
724 struct ath_atx_tid *tid = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700725 struct ath_buf *bf_last = bf->bf_lastbf;
726 struct ath_desc *ds = bf_last->bf_desc;
727 struct ath_buf *bf_next, *bf_lastq = NULL;
728 struct list_head bf_head, bf_pending;
729 u16 seq_st = 0;
730 u32 ba[WME_BA_BMP_SIZE >> 5];
731 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700732
Sujith528f0c62008-10-29 10:14:26 +0530733 skb = (struct sk_buff *)bf->bf_mpdu;
734 tx_info = IEEE80211_SKB_CB(skb);
735
736 if (tx_info->control.sta) {
737 an = (struct ath_node *)tx_info->control.sta->drv_priv;
738 tid = ATH_AN_2_TID(an, bf->bf_tidno);
739 }
740
Sujithcd3d39a2008-08-11 14:03:34 +0530741 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700742 if (isaggr) {
743 if (txok) {
744 if (ATH_DS_TX_BA(ds)) {
745 /*
746 * extract starting sequence and
747 * block-ack bitmap
748 */
749 seq_st = ATH_DS_BA_SEQ(ds);
750 memcpy(ba,
751 ATH_DS_BA_BITMAP(ds),
752 WME_BA_BMP_SIZE >> 3);
753 } else {
Luis R. Rodriguez0345f372008-10-03 15:45:25 -0700754 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700755
756 /*
757 * AR5416 can become deaf/mute when BA
758 * issue happens. Chip needs to be reset.
759 * But AP code may have sychronization issues
760 * when perform internal reset in this routine.
761 * Only enable reset in STA mode for now.
762 */
Sujithb4696c8b2008-08-11 14:04:52 +0530763 if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700764 needreset = 1;
765 }
766 } else {
Luis R. Rodriguez0345f372008-10-03 15:45:25 -0700767 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700768 }
769 }
770
771 INIT_LIST_HEAD(&bf_pending);
772 INIT_LIST_HEAD(&bf_head);
773
774 while (bf) {
775 txfail = txpending = 0;
776 bf_next = bf->bf_next;
777
778 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
779 /* transmit completion, subframe is
780 * acked by block ack */
781 } else if (!isaggr && txok) {
782 /* transmit completion */
783 } else {
784
Sujitha37c2c72008-10-29 10:15:40 +0530785 if (!(tid->state & AGGR_CLEANUP) &&
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700786 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
787 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
788 ath_tx_set_retry(sc, bf);
789 txpending = 1;
790 } else {
Sujithcd3d39a2008-08-11 14:03:34 +0530791 bf->bf_state.bf_type |= BUF_XRETRY;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700792 txfail = 1;
793 sendbar = 1;
794 }
795 } else {
796 /*
797 * cleanup in progress, just fail
798 * the un-acked sub-frames
799 */
800 txfail = 1;
801 }
802 }
803 /*
804 * Remove ath_buf's of this sub-frame from aggregate queue.
805 */
806 if (bf_next == NULL) { /* last subframe in the aggregate */
807 ASSERT(bf->bf_lastfrm == bf_last);
808
809 /*
810 * The last descriptor of the last sub frame could be
811 * a holding descriptor for h/w. If that's the case,
812 * bf->bf_lastfrm won't be in the bf_q.
813 * Make sure we handle bf_q properly here.
814 */
815
816 if (!list_empty(bf_q)) {
817 bf_lastq = list_entry(bf_q->prev,
818 struct ath_buf, list);
819 list_cut_position(&bf_head,
820 bf_q, &bf_lastq->list);
821 } else {
822 /*
823 * XXX: if the last subframe only has one
824 * descriptor which is also being used as
825 * a holding descriptor. Then the ath_buf
826 * is not in the bf_q at all.
827 */
828 INIT_LIST_HEAD(&bf_head);
829 }
830 } else {
831 ASSERT(!list_empty(bf_q));
832 list_cut_position(&bf_head,
833 bf_q, &bf->bf_lastfrm->list);
834 }
835
836 if (!txpending) {
837 /*
838 * complete the acked-ones/xretried ones; update
839 * block-ack window
840 */
841 spin_lock_bh(&txq->axq_lock);
842 ath_tx_update_baw(sc, tid, bf->bf_seqno);
843 spin_unlock_bh(&txq->axq_lock);
844
845 /* complete this sub-frame */
846 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
847 } else {
848 /*
849 * retry the un-acked ones
850 */
851 /*
852 * XXX: if the last descriptor is holding descriptor,
853 * in order to requeue the frame to software queue, we
854 * need to allocate a new descriptor and
855 * copy the content of holding descriptor to it.
856 */
857 if (bf->bf_next == NULL &&
858 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
859 struct ath_buf *tbf;
860
861 /* allocate new descriptor */
862 spin_lock_bh(&sc->sc_txbuflock);
863 ASSERT(!list_empty((&sc->sc_txbuf)));
864 tbf = list_first_entry(&sc->sc_txbuf,
865 struct ath_buf, list);
866 list_del(&tbf->list);
867 spin_unlock_bh(&sc->sc_txbuflock);
868
869 ATH_TXBUF_RESET(tbf);
870
871 /* copy descriptor content */
872 tbf->bf_mpdu = bf_last->bf_mpdu;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700873 tbf->bf_buf_addr = bf_last->bf_buf_addr;
874 *(tbf->bf_desc) = *(bf_last->bf_desc);
875
876 /* link it to the frame */
877 if (bf_lastq) {
878 bf_lastq->bf_desc->ds_link =
879 tbf->bf_daddr;
880 bf->bf_lastfrm = tbf;
881 ath9k_hw_cleartxdesc(sc->sc_ah,
882 bf->bf_lastfrm->bf_desc);
883 } else {
884 tbf->bf_state = bf_last->bf_state;
885 tbf->bf_lastfrm = tbf;
886 ath9k_hw_cleartxdesc(sc->sc_ah,
887 tbf->bf_lastfrm->bf_desc);
888
889 /* copy the DMA context */
Sujithff9b6622008-08-14 13:27:16 +0530890 tbf->bf_dmacontext =
891 bf_last->bf_dmacontext;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700892 }
893 list_add_tail(&tbf->list, &bf_head);
894 } else {
895 /*
896 * Clear descriptor status words for
897 * software retry
898 */
899 ath9k_hw_cleartxdesc(sc->sc_ah,
Sujithff9b6622008-08-14 13:27:16 +0530900 bf->bf_lastfrm->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700901 }
902
903 /*
904 * Put this buffer to the temporary pending
905 * queue to retain ordering
906 */
907 list_splice_tail_init(&bf_head, &bf_pending);
908 }
909
910 bf = bf_next;
911 }
912
Sujitha37c2c72008-10-29 10:15:40 +0530913 if (tid->state & AGGR_CLEANUP) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700914 /* check to see if we're done with cleaning the h/w queue */
915 spin_lock_bh(&txq->axq_lock);
916
917 if (tid->baw_head == tid->baw_tail) {
Sujitha37c2c72008-10-29 10:15:40 +0530918 tid->state &= ~AGGR_ADDBA_COMPLETE;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700919 tid->addba_exchangeattempts = 0;
920 spin_unlock_bh(&txq->axq_lock);
921
Sujitha37c2c72008-10-29 10:15:40 +0530922 tid->state &= ~AGGR_CLEANUP;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700923
924 /* send buffered frames as singles */
925 ath_tx_flush_tid(sc, tid);
926 } else
927 spin_unlock_bh(&txq->axq_lock);
928
929 return;
930 }
931
932 /*
933 * prepend un-acked frames to the beginning of the pending frame queue
934 */
935 if (!list_empty(&bf_pending)) {
936 spin_lock_bh(&txq->axq_lock);
937 /* Note: we _prepend_, we _do_not_ at to
938 * the end of the queue ! */
939 list_splice(&bf_pending, &tid->buf_q);
940 ath_tx_queue_tid(txq, tid);
941 spin_unlock_bh(&txq->axq_lock);
942 }
943
944 if (needreset)
Sujithf45144e2008-08-11 14:02:53 +0530945 ath_reset(sc, false);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700946
947 return;
948}
949
Sujithc4288392008-11-18 09:09:30 +0530950static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad)
951{
952 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
953 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
954 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
955
Vasanthakumar Thiagarajan7ac47012008-11-20 11:51:18 +0530956 tx_info_priv->update_rc = false;
Sujithc4288392008-11-18 09:09:30 +0530957 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
958 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
959
960 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
961 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
962 if (bf_isdata(bf)) {
963 memcpy(&tx_info_priv->tx, &ds->ds_txstat,
964 sizeof(tx_info_priv->tx));
965 tx_info_priv->n_frames = bf->bf_nframes;
966 tx_info_priv->n_bad_frames = nbad;
Vasanthakumar Thiagarajan7ac47012008-11-20 11:51:18 +0530967 tx_info_priv->update_rc = true;
Sujithc4288392008-11-18 09:09:30 +0530968 }
969 }
970}
971
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700972/* Process completed xmit descriptors from the specified queue */
973
Sujithc4288392008-11-18 09:09:30 +0530974static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700975{
976 struct ath_hal *ah = sc->sc_ah;
977 struct ath_buf *bf, *lastbf, *bf_held = NULL;
978 struct list_head bf_head;
Sujithc4288392008-11-18 09:09:30 +0530979 struct ath_desc *ds;
980 int txok, nbad = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700981 int status;
982
Sujith04bd46382008-11-28 22:18:05 +0530983 DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700984 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
985 txq->axq_link);
986
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700987 for (;;) {
988 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700989 if (list_empty(&txq->axq_q)) {
990 txq->axq_link = NULL;
991 txq->axq_linkbuf = NULL;
992 spin_unlock_bh(&txq->axq_lock);
993 break;
994 }
995 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
996
997 /*
998 * There is a race condition that a BH gets scheduled
999 * after sw writes TxE and before hw re-load the last
1000 * descriptor to get the newly chained one.
1001 * Software must keep the last DONE descriptor as a
1002 * holding descriptor - software does so by marking
1003 * it with the STALE flag.
1004 */
1005 bf_held = NULL;
1006 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
1007 bf_held = bf;
1008 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1009 /* FIXME:
1010 * The holding descriptor is the last
1011 * descriptor in queue. It's safe to remove
1012 * the last holding descriptor in BH context.
1013 */
1014 spin_unlock_bh(&txq->axq_lock);
1015 break;
1016 } else {
1017 /* Lets work with the next buffer now */
1018 bf = list_entry(bf_held->list.next,
1019 struct ath_buf, list);
1020 }
1021 }
1022
1023 lastbf = bf->bf_lastbf;
1024 ds = lastbf->bf_desc; /* NB: last decriptor */
1025
1026 status = ath9k_hw_txprocdesc(ah, ds);
1027 if (status == -EINPROGRESS) {
1028 spin_unlock_bh(&txq->axq_lock);
1029 break;
1030 }
1031 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1032 txq->axq_lastdsWithCTS = NULL;
1033 if (ds == txq->axq_gatingds)
1034 txq->axq_gatingds = NULL;
1035
1036 /*
1037 * Remove ath_buf's of the same transmit unit from txq,
1038 * however leave the last descriptor back as the holding
1039 * descriptor for hw.
1040 */
1041 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1042 INIT_LIST_HEAD(&bf_head);
1043
1044 if (!list_is_singular(&lastbf->list))
1045 list_cut_position(&bf_head,
1046 &txq->axq_q, lastbf->list.prev);
1047
1048 txq->axq_depth--;
1049
Sujithcd3d39a2008-08-11 14:03:34 +05301050 if (bf_isaggr(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001051 txq->axq_aggr_depth--;
1052
1053 txok = (ds->ds_txstat.ts_status == 0);
1054
1055 spin_unlock_bh(&txq->axq_lock);
1056
1057 if (bf_held) {
1058 list_del(&bf_held->list);
1059 spin_lock_bh(&sc->sc_txbuflock);
1060 list_add_tail(&bf_held->list, &sc->sc_txbuf);
1061 spin_unlock_bh(&sc->sc_txbuflock);
1062 }
1063
Sujithcd3d39a2008-08-11 14:03:34 +05301064 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001065 /*
1066 * This frame is sent out as a single frame.
1067 * Use hardware retry status for this frame.
1068 */
1069 bf->bf_retries = ds->ds_txstat.ts_longretry;
1070 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05301071 bf->bf_state.bf_type |= BUF_XRETRY;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001072 nbad = 0;
1073 } else {
1074 nbad = ath_tx_num_badfrms(sc, bf, txok);
1075 }
Johannes Berge6a98542008-10-21 12:40:02 +02001076
Sujithc4288392008-11-18 09:09:30 +05301077 ath_tx_rc_status(bf, ds, nbad);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001078
1079 /*
1080 * Complete this transmit unit
1081 */
Sujithcd3d39a2008-08-11 14:03:34 +05301082 if (bf_isampdu(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001083 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1084 else
1085 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1086
1087 /* Wake up mac80211 queue */
1088
1089 spin_lock_bh(&txq->axq_lock);
1090 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1091 (ATH_TXBUF - 20)) {
1092 int qnum;
1093 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1094 if (qnum != -1) {
1095 ieee80211_wake_queue(sc->hw, qnum);
1096 txq->stopped = 0;
1097 }
1098
1099 }
1100
1101 /*
1102 * schedule any pending packets if aggregation is enabled
1103 */
Sujith672840a2008-08-11 14:05:08 +05301104 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001105 ath_txq_schedule(sc, txq);
1106 spin_unlock_bh(&txq->axq_lock);
1107 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001108}
1109
1110static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1111{
1112 struct ath_hal *ah = sc->sc_ah;
1113
1114 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
Sujith04bd46382008-11-28 22:18:05 +05301115 DPRINTF(sc, ATH_DBG_XMIT, "tx queue [%u] %x, link %p\n",
1116 txq->axq_qnum, ath9k_hw_gettxbuf(ah, txq->axq_qnum),
1117 txq->axq_link);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001118}
1119
1120/* Drain only the data queues */
1121
1122static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1123{
1124 struct ath_hal *ah = sc->sc_ah;
Sujith102e0572008-10-29 10:15:16 +05301125 int i, status, npend = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001126
Sujith672840a2008-08-11 14:05:08 +05301127 if (!(sc->sc_flags & SC_OP_INVALID)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001128 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1129 if (ATH_TXQ_SETUP(sc, i)) {
1130 ath_tx_stopdma(sc, &sc->sc_txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001131 /* The TxDMA may not really be stopped.
1132 * Double check the hal tx pending count */
1133 npend += ath9k_hw_numtxpending(ah,
Sujith102e0572008-10-29 10:15:16 +05301134 sc->sc_txq[i].axq_qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001135 }
1136 }
1137 }
1138
1139 if (npend) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001140 /* TxDMA not stopped, reset the hal */
Sujith04bd46382008-11-28 22:18:05 +05301141 DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n");
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001142
1143 spin_lock_bh(&sc->sc_resetlock);
Sujithb4696c8b2008-08-11 14:04:52 +05301144 if (!ath9k_hw_reset(ah,
Sujith927e70e2008-08-14 13:26:34 +05301145 sc->sc_ah->ah_curchan,
Sujith99405f92008-11-24 12:08:35 +05301146 sc->tx_chan_width,
Sujith927e70e2008-08-14 13:26:34 +05301147 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1148 sc->sc_ht_extprotspacing, true, &status)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001149
1150 DPRINTF(sc, ATH_DBG_FATAL,
Sujith04bd46382008-11-28 22:18:05 +05301151 "Unable to reset hardware; hal status %u\n",
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001152 status);
1153 }
1154 spin_unlock_bh(&sc->sc_resetlock);
1155 }
1156
1157 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1158 if (ATH_TXQ_SETUP(sc, i))
1159 ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
1160 }
1161}
1162
1163/* Add a sub-frame to block ack window */
1164
1165static void ath_tx_addto_baw(struct ath_softc *sc,
1166 struct ath_atx_tid *tid,
1167 struct ath_buf *bf)
1168{
1169 int index, cindex;
1170
Sujithcd3d39a2008-08-11 14:03:34 +05301171 if (bf_isretried(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001172 return;
1173
1174 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1175 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1176
1177 ASSERT(tid->tx_buf[cindex] == NULL);
1178 tid->tx_buf[cindex] = bf;
1179
1180 if (index >= ((tid->baw_tail - tid->baw_head) &
1181 (ATH_TID_MAX_BUFS - 1))) {
1182 tid->baw_tail = cindex;
1183 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1184 }
1185}
1186
1187/*
1188 * Function to send an A-MPDU
1189 * NB: must be called with txq lock held
1190 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001191static int ath_tx_send_ampdu(struct ath_softc *sc,
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001192 struct ath_atx_tid *tid,
1193 struct list_head *bf_head,
1194 struct ath_tx_control *txctl)
1195{
1196 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001197
1198 BUG_ON(list_empty(bf_head));
1199
1200 bf = list_first_entry(bf_head, struct ath_buf, list);
Sujithcd3d39a2008-08-11 14:03:34 +05301201 bf->bf_state.bf_type |= BUF_AMPDU;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001202
1203 /*
1204 * Do not queue to h/w when any of the following conditions is true:
1205 * - there are pending frames in software queue
1206 * - the TID is currently paused for ADDBA/BAR request
1207 * - seqno is not within block-ack window
1208 * - h/w queue depth exceeds low water mark
1209 */
1210 if (!list_empty(&tid->buf_q) || tid->paused ||
1211 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
Sujith528f0c62008-10-29 10:14:26 +05301212 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001213 /*
1214 * Add this frame to software queue for scheduling later
1215 * for aggregation.
1216 */
1217 list_splice_tail_init(bf_head, &tid->buf_q);
Sujith528f0c62008-10-29 10:14:26 +05301218 ath_tx_queue_tid(txctl->txq, tid);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001219 return 0;
1220 }
1221
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001222 /* Add sub-frame to BAW */
1223 ath_tx_addto_baw(sc, tid, bf);
1224
1225 /* Queue to h/w without aggregation */
1226 bf->bf_nframes = 1;
1227 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1228 ath_buf_set_rate(sc, bf);
Sujith528f0c62008-10-29 10:14:26 +05301229 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
Sujith102e0572008-10-29 10:15:16 +05301230
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001231 return 0;
1232}
1233
1234/*
1235 * looks up the rate
1236 * returns aggr limit based on lowest of the rates
1237 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001238static u32 ath_lookup_rate(struct ath_softc *sc,
Johannes Bergae5eb022008-10-14 16:58:37 +02001239 struct ath_buf *bf,
1240 struct ath_atx_tid *tid)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001241{
Sujitha8efee42008-11-18 09:07:30 +05301242 struct ath_rate_table *rate_table = sc->hw_rate_table[sc->sc_curmode];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001243 struct sk_buff *skb;
1244 struct ieee80211_tx_info *tx_info;
Sujitha8efee42008-11-18 09:07:30 +05301245 struct ieee80211_tx_rate *rates;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001246 struct ath_tx_info_priv *tx_info_priv;
1247 u32 max_4ms_framelen, frame_length;
1248 u16 aggr_limit, legacy = 0, maxampdu;
1249 int i;
1250
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001251 skb = (struct sk_buff *)bf->bf_mpdu;
1252 tx_info = IEEE80211_SKB_CB(skb);
Sujitha8efee42008-11-18 09:07:30 +05301253 rates = tx_info->control.rates;
1254 tx_info_priv =
1255 (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001256
1257 /*
1258 * Find the lowest frame length among the rate series that will have a
1259 * 4ms transmit duration.
1260 * TODO - TXOP limit needs to be considered.
1261 */
1262 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1263
1264 for (i = 0; i < 4; i++) {
Sujitha8efee42008-11-18 09:07:30 +05301265 if (rates[i].count) {
Sujithe63835b2008-11-18 09:07:53 +05301266 if (!WLAN_RC_PHY_HT(rate_table->info[rates[i].idx].phy)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001267 legacy = 1;
1268 break;
1269 }
1270
Sujitha8efee42008-11-18 09:07:30 +05301271 frame_length =
1272 rate_table->info[rates[i].idx].max_4ms_framelen;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001273 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1274 }
1275 }
1276
1277 /*
1278 * limit aggregate size by the minimum rate if rate selected is
1279 * not a probe rate, if rate selected is a probe rate then
1280 * avoid aggregation of this packet.
1281 */
1282 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1283 return 0;
1284
1285 aggr_limit = min(max_4ms_framelen,
1286 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1287
1288 /*
1289 * h/w can accept aggregates upto 16 bit lengths (65535).
1290 * The IE, however can hold upto 65536, which shows up here
1291 * as zero. Ignore 65536 since we are constrained by hw.
1292 */
Johannes Bergae5eb022008-10-14 16:58:37 +02001293 maxampdu = tid->an->maxampdu;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001294 if (maxampdu)
1295 aggr_limit = min(aggr_limit, maxampdu);
1296
1297 return aggr_limit;
1298}
1299
1300/*
1301 * returns the number of delimiters to be added to
1302 * meet the minimum required mpdudensity.
1303 * caller should make sure that the rate is HT rate .
1304 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001305static int ath_compute_num_delims(struct ath_softc *sc,
Johannes Bergae5eb022008-10-14 16:58:37 +02001306 struct ath_atx_tid *tid,
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001307 struct ath_buf *bf,
1308 u16 frmlen)
1309{
Sujithe63835b2008-11-18 09:07:53 +05301310 struct ath_rate_table *rt = sc->hw_rate_table[sc->sc_curmode];
Sujitha8efee42008-11-18 09:07:30 +05301311 struct sk_buff *skb = bf->bf_mpdu;
1312 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001313 u32 nsymbits, nsymbols, mpdudensity;
1314 u16 minlen;
1315 u8 rc, flags, rix;
1316 int width, half_gi, ndelim, mindelim;
1317
1318 /* Select standard number of delimiters based on frame length alone */
1319 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
1320
1321 /*
1322 * If encryption enabled, hardware requires some more padding between
1323 * subframes.
1324 * TODO - this could be improved to be dependent on the rate.
1325 * The hardware can keep up at lower rates, but not higher rates
1326 */
1327 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
1328 ndelim += ATH_AGGR_ENCRYPTDELIM;
1329
1330 /*
1331 * Convert desired mpdu density from microeconds to bytes based
1332 * on highest rate in rate series (i.e. first rate) to determine
1333 * required minimum length for subframe. Take into account
1334 * whether high rate is 20 or 40Mhz and half or full GI.
1335 */
Johannes Bergae5eb022008-10-14 16:58:37 +02001336 mpdudensity = tid->an->mpdudensity;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001337
1338 /*
1339 * If there is no mpdu density restriction, no further calculation
1340 * is needed.
1341 */
1342 if (mpdudensity == 0)
1343 return ndelim;
1344
Sujitha8efee42008-11-18 09:07:30 +05301345 rix = tx_info->control.rates[0].idx;
1346 flags = tx_info->control.rates[0].flags;
Sujithe63835b2008-11-18 09:07:53 +05301347 rc = rt->info[rix].ratecode;
Sujitha8efee42008-11-18 09:07:30 +05301348 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
1349 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001350
1351 if (half_gi)
1352 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
1353 else
1354 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
1355
1356 if (nsymbols == 0)
1357 nsymbols = 1;
1358
1359 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1360 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1361
1362 /* Is frame shorter than required minimum length? */
1363 if (frmlen < minlen) {
1364 /* Get the minimum number of delimiters required. */
1365 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1366 ndelim = max(mindelim, ndelim);
1367 }
1368
1369 return ndelim;
1370}
1371
1372/*
1373 * For aggregation from software buffer queue.
1374 * NB: must be called with txq lock held
1375 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001376static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1377 struct ath_atx_tid *tid,
1378 struct list_head *bf_q,
1379 struct ath_buf **bf_last,
1380 struct aggr_rifs_param *param,
1381 int *prev_frames)
1382{
1383#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1384 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
1385 struct list_head bf_head;
1386 int rl = 0, nframes = 0, ndelim;
1387 u16 aggr_limit = 0, al = 0, bpad = 0,
1388 al_delta, h_baw = tid->baw_size / 2;
1389 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
Sujitha8efee42008-11-18 09:07:30 +05301390 int prev_al = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001391 INIT_LIST_HEAD(&bf_head);
1392
1393 BUG_ON(list_empty(&tid->buf_q));
1394
1395 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1396
1397 do {
1398 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1399
1400 /*
1401 * do not step over block-ack window
1402 */
1403 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1404 status = ATH_AGGR_BAW_CLOSED;
1405 break;
1406 }
1407
1408 if (!rl) {
Johannes Bergae5eb022008-10-14 16:58:37 +02001409 aggr_limit = ath_lookup_rate(sc, bf, tid);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001410 rl = 1;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001411 }
1412
1413 /*
1414 * do not exceed aggregation limit
1415 */
1416 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1417
1418 if (nframes && (aggr_limit <
1419 (al + bpad + al_delta + prev_al))) {
1420 status = ATH_AGGR_LIMITED;
1421 break;
1422 }
1423
1424 /*
1425 * do not exceed subframe limit
1426 */
1427 if ((nframes + *prev_frames) >=
1428 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1429 status = ATH_AGGR_LIMITED;
1430 break;
1431 }
1432
1433 /*
1434 * add padding for previous frame to aggregation length
1435 */
1436 al += bpad + al_delta;
1437
1438 /*
1439 * Get the delimiters needed to meet the MPDU
1440 * density for this node.
1441 */
Johannes Bergae5eb022008-10-14 16:58:37 +02001442 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001443
1444 bpad = PADBYTES(al_delta) + (ndelim << 2);
1445
1446 bf->bf_next = NULL;
1447 bf->bf_lastfrm->bf_desc->ds_link = 0;
1448
1449 /*
1450 * this packet is part of an aggregate
1451 * - remove all descriptors belonging to this frame from
1452 * software queue
1453 * - add it to block ack window
1454 * - set up descriptors for aggregation
1455 */
1456 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1457 ath_tx_addto_baw(sc, tid, bf);
1458
1459 list_for_each_entry(tbf, &bf_head, list) {
1460 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1461 tbf->bf_desc, ndelim);
1462 }
1463
1464 /*
1465 * link buffers of this frame to the aggregate
1466 */
1467 list_splice_tail_init(&bf_head, bf_q);
1468 nframes++;
1469
1470 if (bf_prev) {
1471 bf_prev->bf_next = bf;
1472 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
1473 }
1474 bf_prev = bf;
1475
1476#ifdef AGGR_NOSHORT
1477 /*
1478 * terminate aggregation on a small packet boundary
1479 */
1480 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1481 status = ATH_AGGR_SHORTPKT;
1482 break;
1483 }
1484#endif
1485 } while (!list_empty(&tid->buf_q));
1486
1487 bf_first->bf_al = al;
1488 bf_first->bf_nframes = nframes;
1489 *bf_last = bf_prev;
1490 return status;
1491#undef PADBYTES
1492}
1493
1494/*
1495 * process pending frames possibly doing a-mpdu aggregation
1496 * NB: must be called with txq lock held
1497 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001498static void ath_tx_sched_aggr(struct ath_softc *sc,
1499 struct ath_txq *txq, struct ath_atx_tid *tid)
1500{
1501 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
1502 enum ATH_AGGR_STATUS status;
1503 struct list_head bf_q;
1504 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1505 int prev_frames = 0;
1506
1507 do {
1508 if (list_empty(&tid->buf_q))
1509 return;
1510
1511 INIT_LIST_HEAD(&bf_q);
1512
1513 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, &param,
1514 &prev_frames);
1515
1516 /*
1517 * no frames picked up to be aggregated; block-ack
1518 * window is not open
1519 */
1520 if (list_empty(&bf_q))
1521 break;
1522
1523 bf = list_first_entry(&bf_q, struct ath_buf, list);
1524 bf_last = list_entry(bf_q.prev, struct ath_buf, list);
1525 bf->bf_lastbf = bf_last;
1526
1527 /*
1528 * if only one frame, send as non-aggregate
1529 */
1530 if (bf->bf_nframes == 1) {
1531 ASSERT(bf->bf_lastfrm == bf_last);
1532
Sujithcd3d39a2008-08-11 14:03:34 +05301533 bf->bf_state.bf_type &= ~BUF_AGGR;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001534 /*
1535 * clear aggr bits for every descriptor
1536 * XXX TODO: is there a way to optimize it?
1537 */
1538 list_for_each_entry(tbf, &bf_q, list) {
1539 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1540 }
1541
1542 ath_buf_set_rate(sc, bf);
1543 ath_tx_txqaddbuf(sc, txq, &bf_q);
1544 continue;
1545 }
1546
1547 /*
1548 * setup first desc with rate and aggr info
1549 */
Sujithcd3d39a2008-08-11 14:03:34 +05301550 bf->bf_state.bf_type |= BUF_AGGR;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001551 ath_buf_set_rate(sc, bf);
1552 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1553
1554 /*
1555 * anchor last frame of aggregate correctly
1556 */
1557 ASSERT(bf_lastaggr);
1558 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1559 tbf = bf_lastaggr;
1560 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1561
1562 /* XXX: We don't enter into this loop, consider removing this */
1563 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1564 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1565 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1566 }
1567
1568 txq->axq_aggr_depth++;
1569
1570 /*
1571 * Normal aggregate, queue to hardware
1572 */
1573 ath_tx_txqaddbuf(sc, txq, &bf_q);
1574
1575 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1576 status != ATH_AGGR_BAW_CLOSED);
1577}
1578
1579/* Called with txq lock held */
1580
1581static void ath_tid_drain(struct ath_softc *sc,
1582 struct ath_txq *txq,
Sujithb5aa9bf2008-10-29 10:13:31 +05301583 struct ath_atx_tid *tid)
1584
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001585{
1586 struct ath_buf *bf;
1587 struct list_head bf_head;
1588 INIT_LIST_HEAD(&bf_head);
1589
1590 for (;;) {
1591 if (list_empty(&tid->buf_q))
1592 break;
1593 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1594
1595 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1596
1597 /* update baw for software retried frame */
Sujithcd3d39a2008-08-11 14:03:34 +05301598 if (bf_isretried(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001599 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1600
1601 /*
1602 * do not indicate packets while holding txq spinlock.
1603 * unlock is intentional here
1604 */
Sujithb5aa9bf2008-10-29 10:13:31 +05301605 spin_unlock(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001606
1607 /* complete this sub-frame */
1608 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1609
Sujithb5aa9bf2008-10-29 10:13:31 +05301610 spin_lock(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001611 }
1612
1613 /*
1614 * TODO: For frame(s) that are in the retry state, we will reuse the
1615 * sequence number(s) without setting the retry bit. The
1616 * alternative is to give up on these and BAR the receiver's window
1617 * forward.
1618 */
1619 tid->seq_next = tid->seq_start;
1620 tid->baw_tail = tid->baw_head;
1621}
1622
1623/*
1624 * Drain all pending buffers
1625 * NB: must be called with txq lock held
1626 */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001627static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
Sujithb5aa9bf2008-10-29 10:13:31 +05301628 struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001629{
1630 struct ath_atx_ac *ac, *ac_tmp;
1631 struct ath_atx_tid *tid, *tid_tmp;
1632
1633 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1634 list_del(&ac->list);
1635 ac->sched = false;
1636 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1637 list_del(&tid->list);
1638 tid->sched = false;
Sujithb5aa9bf2008-10-29 10:13:31 +05301639 ath_tid_drain(sc, txq, tid);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001640 }
1641 }
1642}
1643
Sujith528f0c62008-10-29 10:14:26 +05301644static void ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
Sujith8f93b8b2008-11-18 09:10:42 +05301645 struct sk_buff *skb,
Sujith528f0c62008-10-29 10:14:26 +05301646 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001647{
Sujith528f0c62008-10-29 10:14:26 +05301648 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1649 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001650 struct ath_tx_info_priv *tx_info_priv;
Sujith528f0c62008-10-29 10:14:26 +05301651 int hdrlen;
1652 __le16 fc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001653
Sujitha8efee42008-11-18 09:07:30 +05301654 tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_KERNEL);
1655 tx_info->rate_driver_data[0] = tx_info_priv;
Sujith528f0c62008-10-29 10:14:26 +05301656 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1657 fc = hdr->frame_control;
Jouni Malinene022edb2008-08-22 17:31:33 +03001658
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001659 ATH_TXBUF_RESET(bf);
Sujith528f0c62008-10-29 10:14:26 +05301660
1661 /* Frame type */
1662
1663 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
Sujithcd3d39a2008-08-11 14:03:34 +05301664
1665 ieee80211_is_data(fc) ?
1666 (bf->bf_state.bf_type |= BUF_DATA) :
1667 (bf->bf_state.bf_type &= ~BUF_DATA);
1668 ieee80211_is_back_req(fc) ?
1669 (bf->bf_state.bf_type |= BUF_BAR) :
1670 (bf->bf_state.bf_type &= ~BUF_BAR);
1671 ieee80211_is_pspoll(fc) ?
1672 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1673 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
Sujith672840a2008-08-11 14:05:08 +05301674 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
Sujithcd3d39a2008-08-11 14:03:34 +05301675 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1676 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
Sujitha8efee42008-11-18 09:07:30 +05301677 (sc->hw->conf.ht.enabled && !is_pae(skb) &&
Sujith528f0c62008-10-29 10:14:26 +05301678 (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ?
1679 (bf->bf_state.bf_type |= BUF_HT) :
1680 (bf->bf_state.bf_type &= ~BUF_HT);
Sujithcd3d39a2008-08-11 14:03:34 +05301681
Sujith528f0c62008-10-29 10:14:26 +05301682 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
1683
1684 /* Crypto */
1685
1686 bf->bf_keytype = get_hw_crypto_keytype(skb);
1687
1688 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1689 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1690 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1691 } else {
1692 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1693 }
1694
Sujith528f0c62008-10-29 10:14:26 +05301695 /* Assign seqno, tidno */
1696
1697 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR))
1698 assign_aggr_tid_seqno(skb, bf);
1699
1700 /* DMA setup */
1701
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001702 bf->bf_mpdu = skb;
Sujith528f0c62008-10-29 10:14:26 +05301703 bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
1704 skb->len, PCI_DMA_TODEVICE);
1705 bf->bf_buf_addr = bf->bf_dmacontext;
1706}
1707
1708/* FIXME: tx power */
1709static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
Sujith528f0c62008-10-29 10:14:26 +05301710 struct ath_tx_control *txctl)
1711{
1712 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
1713 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1714 struct ath_node *an = NULL;
1715 struct list_head bf_head;
1716 struct ath_desc *ds;
1717 struct ath_atx_tid *tid;
1718 struct ath_hal *ah = sc->sc_ah;
1719 int frm_type;
1720
Sujith528f0c62008-10-29 10:14:26 +05301721 frm_type = get_hw_packet_type(skb);
1722
1723 INIT_LIST_HEAD(&bf_head);
1724 list_add_tail(&bf->list, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001725
1726 /* setup descriptor */
Sujith528f0c62008-10-29 10:14:26 +05301727
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001728 ds = bf->bf_desc;
1729 ds->ds_link = 0;
1730 ds->ds_data = bf->bf_buf_addr;
1731
Sujith528f0c62008-10-29 10:14:26 +05301732 /* Formulate first tx descriptor with tx controls */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001733
Sujith528f0c62008-10-29 10:14:26 +05301734 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1735 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1736
1737 ath9k_hw_filltxdesc(ah, ds,
Sujith8f93b8b2008-11-18 09:10:42 +05301738 skb->len, /* segment length */
1739 true, /* first segment */
1740 true, /* last segment */
1741 ds); /* first descriptor */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001742
1743 bf->bf_lastfrm = bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001744
Sujith528f0c62008-10-29 10:14:26 +05301745 spin_lock_bh(&txctl->txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001746
John W. Linvillef1617962008-10-31 16:45:15 -04001747 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1748 tx_info->control.sta) {
1749 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1750 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1751
Sujith528f0c62008-10-29 10:14:26 +05301752 if (ath_aggr_query(sc, an, bf->bf_tidno)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001753 /*
1754 * Try aggregation if it's a unicast data frame
1755 * and the destination is HT capable.
1756 */
Sujith528f0c62008-10-29 10:14:26 +05301757 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001758 } else {
1759 /*
Sujith528f0c62008-10-29 10:14:26 +05301760 * Send this frame as regular when ADDBA
1761 * exchange is neither complete nor pending.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001762 */
Sujith528f0c62008-10-29 10:14:26 +05301763 ath_tx_send_normal(sc, txctl->txq,
1764 tid, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001765 }
1766 } else {
1767 bf->bf_lastbf = bf;
1768 bf->bf_nframes = 1;
Sujith528f0c62008-10-29 10:14:26 +05301769
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001770 ath_buf_set_rate(sc, bf);
Sujith528f0c62008-10-29 10:14:26 +05301771 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001772 }
Sujith528f0c62008-10-29 10:14:26 +05301773
1774 spin_unlock_bh(&txctl->txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001775}
1776
Sujith528f0c62008-10-29 10:14:26 +05301777int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
1778 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001779{
Sujith528f0c62008-10-29 10:14:26 +05301780 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001781
Sujith528f0c62008-10-29 10:14:26 +05301782 /* Check if a tx buffer is available */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001783
Sujith528f0c62008-10-29 10:14:26 +05301784 bf = ath_tx_get_buffer(sc);
1785 if (!bf) {
Sujith04bd46382008-11-28 22:18:05 +05301786 DPRINTF(sc, ATH_DBG_XMIT, "TX buffers are full\n");
Sujith528f0c62008-10-29 10:14:26 +05301787 return -1;
1788 }
1789
Sujith8f93b8b2008-11-18 09:10:42 +05301790 ath_tx_setup_buffer(sc, bf, skb, txctl);
1791 ath_tx_start_dma(sc, bf, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001792
Sujith528f0c62008-10-29 10:14:26 +05301793 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001794}
1795
1796/* Initialize TX queue and h/w */
1797
1798int ath_tx_init(struct ath_softc *sc, int nbufs)
1799{
1800 int error = 0;
1801
1802 do {
1803 spin_lock_init(&sc->sc_txbuflock);
1804
1805 /* Setup tx descriptors */
1806 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
Sujith556bb8f2008-08-11 14:03:53 +05301807 "tx", nbufs, 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001808 if (error != 0) {
1809 DPRINTF(sc, ATH_DBG_FATAL,
Sujith04bd46382008-11-28 22:18:05 +05301810 "Failed to allocate tx descriptors: %d\n",
1811 error);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001812 break;
1813 }
1814
1815 /* XXX allocate beacon state together with vap */
1816 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
1817 "beacon", ATH_BCBUF, 1);
1818 if (error != 0) {
1819 DPRINTF(sc, ATH_DBG_FATAL,
Sujith04bd46382008-11-28 22:18:05 +05301820 "Failed to allocate beacon descriptors: %d\n",
1821 error);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001822 break;
1823 }
1824
1825 } while (0);
1826
1827 if (error != 0)
1828 ath_tx_cleanup(sc);
1829
1830 return error;
1831}
1832
1833/* Reclaim all tx queue resources */
1834
1835int ath_tx_cleanup(struct ath_softc *sc)
1836{
1837 /* cleanup beacon descriptors */
1838 if (sc->sc_bdma.dd_desc_len != 0)
1839 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
1840
1841 /* cleanup tx descriptors */
1842 if (sc->sc_txdma.dd_desc_len != 0)
1843 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
1844
1845 return 0;
1846}
1847
1848/* Setup a h/w transmit queue */
1849
1850struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1851{
1852 struct ath_hal *ah = sc->sc_ah;
Sujithea9880f2008-08-07 10:53:10 +05301853 struct ath9k_tx_queue_info qi;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001854 int qnum;
1855
Luis R. Rodriguez0345f372008-10-03 15:45:25 -07001856 memset(&qi, 0, sizeof(qi));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001857 qi.tqi_subtype = subtype;
1858 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1859 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1860 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
Sujithea9880f2008-08-07 10:53:10 +05301861 qi.tqi_physCompBuf = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001862
1863 /*
1864 * Enable interrupts only for EOL and DESC conditions.
1865 * We mark tx descriptors to receive a DESC interrupt
1866 * when a tx queue gets deep; otherwise waiting for the
1867 * EOL to reap descriptors. Note that this is done to
1868 * reduce interrupt load and this only defers reaping
1869 * descriptors, never transmitting frames. Aside from
1870 * reducing interrupts this also permits more concurrency.
1871 * The only potential downside is if the tx queue backs
1872 * up in which case the top half of the kernel may backup
1873 * due to a lack of tx descriptors.
1874 *
1875 * The UAPSD queue is an exception, since we take a desc-
1876 * based intr on the EOSP frames.
1877 */
1878 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1879 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1880 else
1881 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1882 TXQ_FLAG_TXDESCINT_ENABLE;
1883 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1884 if (qnum == -1) {
1885 /*
1886 * NB: don't print a message, this happens
1887 * normally on parts with too few tx queues
1888 */
1889 return NULL;
1890 }
1891 if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
1892 DPRINTF(sc, ATH_DBG_FATAL,
Sujith04bd46382008-11-28 22:18:05 +05301893 "qnum %u out of range, max %u!\n",
1894 qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001895 ath9k_hw_releasetxqueue(ah, qnum);
1896 return NULL;
1897 }
1898 if (!ATH_TXQ_SETUP(sc, qnum)) {
1899 struct ath_txq *txq = &sc->sc_txq[qnum];
1900
1901 txq->axq_qnum = qnum;
1902 txq->axq_link = NULL;
1903 INIT_LIST_HEAD(&txq->axq_q);
1904 INIT_LIST_HEAD(&txq->axq_acq);
1905 spin_lock_init(&txq->axq_lock);
1906 txq->axq_depth = 0;
1907 txq->axq_aggr_depth = 0;
1908 txq->axq_totalqueued = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001909 txq->axq_linkbuf = NULL;
1910 sc->sc_txqsetup |= 1<<qnum;
1911 }
1912 return &sc->sc_txq[qnum];
1913}
1914
1915/* Reclaim resources for a setup queue */
1916
1917void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1918{
1919 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1920 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
1921}
1922
1923/*
1924 * Setup a hardware data transmit queue for the specified
1925 * access control. The hal may not support all requested
1926 * queues in which case it will return a reference to a
1927 * previously setup queue. We record the mapping from ac's
1928 * to h/w queues for use by ath_tx_start and also track
1929 * the set of h/w queues being used to optimize work in the
1930 * transmit interrupt handler and related routines.
1931 */
1932
1933int ath_tx_setup(struct ath_softc *sc, int haltype)
1934{
1935 struct ath_txq *txq;
1936
1937 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
1938 DPRINTF(sc, ATH_DBG_FATAL,
Sujith04bd46382008-11-28 22:18:05 +05301939 "HAL AC %u out of range, max %zu!\n",
1940 haltype, ARRAY_SIZE(sc->sc_haltype2q));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001941 return 0;
1942 }
1943 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1944 if (txq != NULL) {
1945 sc->sc_haltype2q[haltype] = txq->axq_qnum;
1946 return 1;
1947 } else
1948 return 0;
1949}
1950
1951int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
1952{
1953 int qnum;
1954
1955 switch (qtype) {
1956 case ATH9K_TX_QUEUE_DATA:
1957 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
1958 DPRINTF(sc, ATH_DBG_FATAL,
Sujith04bd46382008-11-28 22:18:05 +05301959 "HAL AC %u out of range, max %zu!\n",
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001960 haltype, ARRAY_SIZE(sc->sc_haltype2q));
1961 return -1;
1962 }
1963 qnum = sc->sc_haltype2q[haltype];
1964 break;
1965 case ATH9K_TX_QUEUE_BEACON:
1966 qnum = sc->sc_bhalq;
1967 break;
1968 case ATH9K_TX_QUEUE_CAB:
1969 qnum = sc->sc_cabq->axq_qnum;
1970 break;
1971 default:
1972 qnum = -1;
1973 }
1974 return qnum;
1975}
1976
Sujith528f0c62008-10-29 10:14:26 +05301977/* Get a transmit queue, if available */
1978
1979struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
1980{
1981 struct ath_txq *txq = NULL;
1982 int qnum;
1983
1984 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
1985 txq = &sc->sc_txq[qnum];
1986
1987 spin_lock_bh(&txq->axq_lock);
1988
1989 /* Try to avoid running out of descriptors */
1990 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
1991 DPRINTF(sc, ATH_DBG_FATAL,
Sujith04bd46382008-11-28 22:18:05 +05301992 "TX queue: %d is full, depth: %d\n",
1993 qnum, txq->axq_depth);
Sujith528f0c62008-10-29 10:14:26 +05301994 ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
1995 txq->stopped = 1;
1996 spin_unlock_bh(&txq->axq_lock);
1997 return NULL;
1998 }
1999
2000 spin_unlock_bh(&txq->axq_lock);
2001
2002 return txq;
2003}
2004
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002005/* Update parameters for a transmit queue */
2006
Sujithea9880f2008-08-07 10:53:10 +05302007int ath_txq_update(struct ath_softc *sc, int qnum,
2008 struct ath9k_tx_queue_info *qinfo)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002009{
2010 struct ath_hal *ah = sc->sc_ah;
2011 int error = 0;
Sujithea9880f2008-08-07 10:53:10 +05302012 struct ath9k_tx_queue_info qi;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002013
2014 if (qnum == sc->sc_bhalq) {
2015 /*
2016 * XXX: for beacon queue, we just save the parameter.
2017 * It will be picked up by ath_beaconq_config when
2018 * it's necessary.
2019 */
Sujithea9880f2008-08-07 10:53:10 +05302020 sc->sc_beacon_qi = *qinfo;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002021 return 0;
2022 }
2023
2024 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
2025
Sujithea9880f2008-08-07 10:53:10 +05302026 ath9k_hw_get_txq_props(ah, qnum, &qi);
2027 qi.tqi_aifs = qinfo->tqi_aifs;
2028 qi.tqi_cwmin = qinfo->tqi_cwmin;
2029 qi.tqi_cwmax = qinfo->tqi_cwmax;
2030 qi.tqi_burstTime = qinfo->tqi_burstTime;
2031 qi.tqi_readyTime = qinfo->tqi_readyTime;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002032
Sujithea9880f2008-08-07 10:53:10 +05302033 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002034 DPRINTF(sc, ATH_DBG_FATAL,
Sujith04bd46382008-11-28 22:18:05 +05302035 "Unable to update hardware queue %u!\n", qnum);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002036 error = -EIO;
2037 } else {
2038 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
2039 }
2040
2041 return error;
2042}
2043
2044int ath_cabq_update(struct ath_softc *sc)
2045{
Sujithea9880f2008-08-07 10:53:10 +05302046 struct ath9k_tx_queue_info qi;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002047 int qnum = sc->sc_cabq->axq_qnum;
2048 struct ath_beacon_config conf;
2049
Sujithea9880f2008-08-07 10:53:10 +05302050 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002051 /*
2052 * Ensure the readytime % is within the bounds.
2053 */
2054 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2055 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2056 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2057 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2058
2059 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2060 qi.tqi_readyTime =
2061 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
2062 ath_txq_update(sc, qnum, &qi);
2063
2064 return 0;
2065}
2066
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002067/* Deferred processing of transmit interrupt */
2068
2069void ath_tx_tasklet(struct ath_softc *sc)
2070{
Sujith1fe11322008-08-26 08:11:06 +05302071 int i;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002072 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2073
2074 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2075
2076 /*
2077 * Process each active queue.
2078 */
2079 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2080 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
Sujith1fe11322008-08-26 08:11:06 +05302081 ath_tx_processq(sc, &sc->sc_txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002082 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002083}
2084
2085void ath_tx_draintxq(struct ath_softc *sc,
2086 struct ath_txq *txq, bool retry_tx)
2087{
2088 struct ath_buf *bf, *lastbf;
2089 struct list_head bf_head;
2090
2091 INIT_LIST_HEAD(&bf_head);
2092
2093 /*
2094 * NB: this assumes output has been stopped and
2095 * we do not need to block ath_tx_tasklet
2096 */
2097 for (;;) {
2098 spin_lock_bh(&txq->axq_lock);
2099
2100 if (list_empty(&txq->axq_q)) {
2101 txq->axq_link = NULL;
2102 txq->axq_linkbuf = NULL;
2103 spin_unlock_bh(&txq->axq_lock);
2104 break;
2105 }
2106
2107 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2108
2109 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2110 list_del(&bf->list);
2111 spin_unlock_bh(&txq->axq_lock);
2112
2113 spin_lock_bh(&sc->sc_txbuflock);
2114 list_add_tail(&bf->list, &sc->sc_txbuf);
2115 spin_unlock_bh(&sc->sc_txbuflock);
2116 continue;
2117 }
2118
2119 lastbf = bf->bf_lastbf;
2120 if (!retry_tx)
2121 lastbf->bf_desc->ds_txstat.ts_flags =
2122 ATH9K_TX_SW_ABORTED;
2123
2124 /* remove ath_buf's of the same mpdu from txq */
2125 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
2126 txq->axq_depth--;
2127
2128 spin_unlock_bh(&txq->axq_lock);
2129
Sujithcd3d39a2008-08-11 14:03:34 +05302130 if (bf_isampdu(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002131 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2132 else
2133 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2134 }
2135
2136 /* flush any pending frames if aggregation is enabled */
Sujith672840a2008-08-11 14:05:08 +05302137 if (sc->sc_flags & SC_OP_TXAGGR) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002138 if (!retry_tx) {
2139 spin_lock_bh(&txq->axq_lock);
Sujithb5aa9bf2008-10-29 10:13:31 +05302140 ath_txq_drain_pending_buffers(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002141 spin_unlock_bh(&txq->axq_lock);
2142 }
2143 }
2144}
2145
2146/* Drain the transmit queues and reclaim resources */
2147
2148void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2149{
2150 /* stop beacon queue. The beacon will be freed when
2151 * we go to INIT state */
Sujith672840a2008-08-11 14:05:08 +05302152 if (!(sc->sc_flags & SC_OP_INVALID)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002153 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
Sujith04bd46382008-11-28 22:18:05 +05302154 DPRINTF(sc, ATH_DBG_XMIT, "beacon queue %x\n",
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002155 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
2156 }
2157
2158 ath_drain_txdataq(sc, retry_tx);
2159}
2160
2161u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2162{
2163 return sc->sc_txq[qnum].axq_depth;
2164}
2165
2166u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2167{
2168 return sc->sc_txq[qnum].axq_aggr_depth;
2169}
2170
Sujithccc75c52008-10-29 10:18:14 +05302171bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002172{
2173 struct ath_atx_tid *txtid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002174
Sujith672840a2008-08-11 14:05:08 +05302175 if (!(sc->sc_flags & SC_OP_TXAGGR))
Sujithccc75c52008-10-29 10:18:14 +05302176 return false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002177
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002178 txtid = ATH_AN_2_TID(an, tidno);
2179
Sujitha37c2c72008-10-29 10:15:40 +05302180 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
2181 if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002182 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2183 txtid->addba_exchangeattempts++;
Sujithccc75c52008-10-29 10:18:14 +05302184 return true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002185 }
2186 }
2187
Sujithccc75c52008-10-29 10:18:14 +05302188 return false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002189}
2190
2191/* Start TX aggregation */
2192
Sujithb5aa9bf2008-10-29 10:13:31 +05302193int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
2194 u16 tid, u16 *ssn)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002195{
2196 struct ath_atx_tid *txtid;
2197 struct ath_node *an;
2198
Sujithb5aa9bf2008-10-29 10:13:31 +05302199 an = (struct ath_node *)sta->drv_priv;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002200
Sujith672840a2008-08-11 14:05:08 +05302201 if (sc->sc_flags & SC_OP_TXAGGR) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002202 txtid = ATH_AN_2_TID(an, tid);
Sujitha37c2c72008-10-29 10:15:40 +05302203 txtid->state |= AGGR_ADDBA_PROGRESS;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002204 ath_tx_pause_tid(sc, txtid);
2205 }
2206
2207 return 0;
2208}
2209
2210/* Stop tx aggregation */
2211
Sujithb5aa9bf2008-10-29 10:13:31 +05302212int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002213{
Sujithb5aa9bf2008-10-29 10:13:31 +05302214 struct ath_node *an = (struct ath_node *)sta->drv_priv;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002215
2216 ath_tx_aggr_teardown(sc, an, tid);
2217 return 0;
2218}
2219
Sujith8469cde2008-10-29 10:19:28 +05302220/* Resume tx aggregation */
2221
2222void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
2223{
2224 struct ath_atx_tid *txtid;
2225 struct ath_node *an;
2226
2227 an = (struct ath_node *)sta->drv_priv;
2228
2229 if (sc->sc_flags & SC_OP_TXAGGR) {
2230 txtid = ATH_AN_2_TID(an, tid);
2231 txtid->baw_size =
2232 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
2233 txtid->state |= AGGR_ADDBA_COMPLETE;
2234 txtid->state &= ~AGGR_ADDBA_PROGRESS;
2235 ath_tx_resume_tid(sc, txtid);
2236 }
2237}
2238
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002239/*
2240 * Performs transmit side cleanup when TID changes from aggregated to
2241 * unaggregated.
2242 * - Pause the TID and mark cleanup in progress
2243 * - Discard all retry frames from the s/w queue.
2244 */
2245
Sujithb5aa9bf2008-10-29 10:13:31 +05302246void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002247{
2248 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2249 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
2250 struct ath_buf *bf;
2251 struct list_head bf_head;
2252 INIT_LIST_HEAD(&bf_head);
2253
Sujitha37c2c72008-10-29 10:15:40 +05302254 if (txtid->state & AGGR_CLEANUP) /* cleanup is in progress */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002255 return;
2256
Sujitha37c2c72008-10-29 10:15:40 +05302257 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002258 txtid->addba_exchangeattempts = 0;
2259 return;
2260 }
2261
2262 /* TID must be paused first */
2263 ath_tx_pause_tid(sc, txtid);
2264
2265 /* drop all software retried frames and mark this TID */
2266 spin_lock_bh(&txq->axq_lock);
2267 while (!list_empty(&txtid->buf_q)) {
2268 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
Sujithcd3d39a2008-08-11 14:03:34 +05302269 if (!bf_isretried(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002270 /*
2271 * NB: it's based on the assumption that
2272 * software retried frame will always stay
2273 * at the head of software queue.
2274 */
2275 break;
2276 }
2277 list_cut_position(&bf_head,
2278 &txtid->buf_q, &bf->bf_lastfrm->list);
2279 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
2280
2281 /* complete this sub-frame */
2282 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2283 }
2284
2285 if (txtid->baw_head != txtid->baw_tail) {
2286 spin_unlock_bh(&txq->axq_lock);
Sujitha37c2c72008-10-29 10:15:40 +05302287 txtid->state |= AGGR_CLEANUP;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002288 } else {
Sujitha37c2c72008-10-29 10:15:40 +05302289 txtid->state &= ~AGGR_ADDBA_COMPLETE;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002290 txtid->addba_exchangeattempts = 0;
2291 spin_unlock_bh(&txq->axq_lock);
2292 ath_tx_flush_tid(sc, txtid);
2293 }
2294}
2295
2296/*
2297 * Tx scheduling logic
2298 * NB: must be called with txq lock held
2299 */
2300
2301void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2302{
2303 struct ath_atx_ac *ac;
2304 struct ath_atx_tid *tid;
2305
2306 /* nothing to schedule */
2307 if (list_empty(&txq->axq_acq))
2308 return;
2309 /*
2310 * get the first node/ac pair on the queue
2311 */
2312 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2313 list_del(&ac->list);
2314 ac->sched = false;
2315
2316 /*
2317 * process a single tid per destination
2318 */
2319 do {
2320 /* nothing to schedule */
2321 if (list_empty(&ac->tid_q))
2322 return;
2323
2324 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
2325 list_del(&tid->list);
2326 tid->sched = false;
2327
2328 if (tid->paused) /* check next tid to keep h/w busy */
2329 continue;
2330
Sujith43453b32008-10-29 10:14:52 +05302331 if ((txq->axq_depth % 2) == 0)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002332 ath_tx_sched_aggr(sc, txq, tid);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002333
2334 /*
2335 * add tid to round-robin queue if more frames
2336 * are pending for the tid
2337 */
2338 if (!list_empty(&tid->buf_q))
2339 ath_tx_queue_tid(txq, tid);
2340
2341 /* only schedule one TID at a time */
2342 break;
2343 } while (!list_empty(&ac->tid_q));
2344
2345 /*
2346 * schedule AC if more TIDs need processing
2347 */
2348 if (!list_empty(&ac->tid_q)) {
2349 /*
2350 * add dest ac to txq if not already added
2351 */
2352 if (!ac->sched) {
2353 ac->sched = true;
2354 list_add_tail(&ac->list, &txq->axq_acq);
2355 }
2356 }
2357}
2358
2359/* Initialize per-node transmit state */
2360
2361void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2362{
Sujithc5170162008-10-29 10:13:59 +05302363 struct ath_atx_tid *tid;
2364 struct ath_atx_ac *ac;
2365 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002366
Sujithc5170162008-10-29 10:13:59 +05302367 /*
2368 * Init per tid tx state
2369 */
2370 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2371 tidno < WME_NUM_TID;
2372 tidno++, tid++) {
2373 tid->an = an;
2374 tid->tidno = tidno;
2375 tid->seq_start = tid->seq_next = 0;
2376 tid->baw_size = WME_MAX_BA;
2377 tid->baw_head = tid->baw_tail = 0;
2378 tid->sched = false;
2379 tid->paused = false;
Sujitha37c2c72008-10-29 10:15:40 +05302380 tid->state &= ~AGGR_CLEANUP;
Sujithc5170162008-10-29 10:13:59 +05302381 INIT_LIST_HEAD(&tid->buf_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002382
Sujithc5170162008-10-29 10:13:59 +05302383 acno = TID_TO_WME_AC(tidno);
2384 tid->ac = &an->an_aggr.tx.ac[acno];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002385
Sujithc5170162008-10-29 10:13:59 +05302386 /* ADDBA state */
Sujitha37c2c72008-10-29 10:15:40 +05302387 tid->state &= ~AGGR_ADDBA_COMPLETE;
2388 tid->state &= ~AGGR_ADDBA_PROGRESS;
2389 tid->addba_exchangeattempts = 0;
Sujithc5170162008-10-29 10:13:59 +05302390 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002391
Sujithc5170162008-10-29 10:13:59 +05302392 /*
2393 * Init per ac tx state
2394 */
2395 for (acno = 0, ac = &an->an_aggr.tx.ac[acno];
2396 acno < WME_NUM_AC; acno++, ac++) {
2397 ac->sched = false;
2398 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002399
Sujithc5170162008-10-29 10:13:59 +05302400 switch (acno) {
2401 case WME_AC_BE:
2402 ac->qnum = ath_tx_get_qnum(sc,
2403 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2404 break;
2405 case WME_AC_BK:
2406 ac->qnum = ath_tx_get_qnum(sc,
2407 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2408 break;
2409 case WME_AC_VI:
2410 ac->qnum = ath_tx_get_qnum(sc,
2411 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2412 break;
2413 case WME_AC_VO:
2414 ac->qnum = ath_tx_get_qnum(sc,
2415 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2416 break;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002417 }
2418 }
2419}
2420
2421/* Cleanupthe pending buffers for the node. */
2422
Sujithb5aa9bf2008-10-29 10:13:31 +05302423void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002424{
2425 int i;
2426 struct ath_atx_ac *ac, *ac_tmp;
2427 struct ath_atx_tid *tid, *tid_tmp;
2428 struct ath_txq *txq;
2429 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2430 if (ATH_TXQ_SETUP(sc, i)) {
2431 txq = &sc->sc_txq[i];
2432
Sujithb5aa9bf2008-10-29 10:13:31 +05302433 spin_lock(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002434
2435 list_for_each_entry_safe(ac,
2436 ac_tmp, &txq->axq_acq, list) {
2437 tid = list_first_entry(&ac->tid_q,
2438 struct ath_atx_tid, list);
2439 if (tid && tid->an != an)
2440 continue;
2441 list_del(&ac->list);
2442 ac->sched = false;
2443
2444 list_for_each_entry_safe(tid,
2445 tid_tmp, &ac->tid_q, list) {
2446 list_del(&tid->list);
2447 tid->sched = false;
Sujithb5aa9bf2008-10-29 10:13:31 +05302448 ath_tid_drain(sc, txq, tid);
Sujitha37c2c72008-10-29 10:15:40 +05302449 tid->state &= ~AGGR_ADDBA_COMPLETE;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002450 tid->addba_exchangeattempts = 0;
Sujitha37c2c72008-10-29 10:15:40 +05302451 tid->state &= ~AGGR_CLEANUP;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002452 }
2453 }
2454
Sujithb5aa9bf2008-10-29 10:13:31 +05302455 spin_unlock(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002456 }
2457 }
2458}
2459
Jouni Malinene022edb2008-08-22 17:31:33 +03002460void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2461{
2462 int hdrlen, padsize;
2463 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2464 struct ath_tx_control txctl;
2465
Sujith528f0c62008-10-29 10:14:26 +05302466 memset(&txctl, 0, sizeof(struct ath_tx_control));
2467
Jouni Malinene022edb2008-08-22 17:31:33 +03002468 /*
2469 * As a temporary workaround, assign seq# here; this will likely need
2470 * to be cleaned up to work better with Beacon transmission and virtual
2471 * BSSes.
2472 */
2473 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2474 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2475 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2476 sc->seq_no += 0x10;
2477 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2478 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
2479 }
2480
2481 /* Add the padding after the header if this is not already done */
2482 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2483 if (hdrlen & 3) {
2484 padsize = hdrlen % 4;
2485 if (skb_headroom(skb) < padsize) {
Sujith04bd46382008-11-28 22:18:05 +05302486 DPRINTF(sc, ATH_DBG_XMIT, "TX CABQ padding failed\n");
Jouni Malinene022edb2008-08-22 17:31:33 +03002487 dev_kfree_skb_any(skb);
2488 return;
2489 }
2490 skb_push(skb, padsize);
2491 memmove(skb->data, skb->data + padsize, hdrlen);
2492 }
2493
Sujith528f0c62008-10-29 10:14:26 +05302494 txctl.txq = sc->sc_cabq;
2495
Sujith04bd46382008-11-28 22:18:05 +05302496 DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb);
Jouni Malinene022edb2008-08-22 17:31:33 +03002497
Sujith528f0c62008-10-29 10:14:26 +05302498 if (ath_tx_start(sc, skb, &txctl) != 0) {
Sujith04bd46382008-11-28 22:18:05 +05302499 DPRINTF(sc, ATH_DBG_XMIT, "CABQ TX failed\n");
Sujith528f0c62008-10-29 10:14:26 +05302500 goto exit;
Jouni Malinene022edb2008-08-22 17:31:33 +03002501 }
Jouni Malinene022edb2008-08-22 17:31:33 +03002502
Sujith528f0c62008-10-29 10:14:26 +05302503 return;
2504exit:
2505 dev_kfree_skb_any(skb);
2506}