blob: 4e7108a179fbbea02c0267bbc627c95efda3f13e [file] [log] [blame]
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * Implementation of transmit path.
19 */
20
21#include "core.h"
22
23#define BITS_PER_BYTE 8
24#define OFDM_PLCP_BITS 22
25#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
26#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
27#define L_STF 8
28#define L_LTF 8
29#define L_SIG 4
30#define HT_SIG 8
31#define HT_STF 4
32#define HT_LTF(_ns) (4 * (_ns))
33#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
34#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
35#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
36#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
37
38#define OFDM_SIFS_TIME 16
39
40static u32 bits_per_symbol[][2] = {
41 /* 20MHz 40MHz */
42 { 26, 54 }, /* 0: BPSK */
43 { 52, 108 }, /* 1: QPSK 1/2 */
44 { 78, 162 }, /* 2: QPSK 3/4 */
45 { 104, 216 }, /* 3: 16-QAM 1/2 */
46 { 156, 324 }, /* 4: 16-QAM 3/4 */
47 { 208, 432 }, /* 5: 64-QAM 2/3 */
48 { 234, 486 }, /* 6: 64-QAM 3/4 */
49 { 260, 540 }, /* 7: 64-QAM 5/6 */
50 { 52, 108 }, /* 8: BPSK */
51 { 104, 216 }, /* 9: QPSK 1/2 */
52 { 156, 324 }, /* 10: QPSK 3/4 */
53 { 208, 432 }, /* 11: 16-QAM 1/2 */
54 { 312, 648 }, /* 12: 16-QAM 3/4 */
55 { 416, 864 }, /* 13: 64-QAM 2/3 */
56 { 468, 972 }, /* 14: 64-QAM 3/4 */
57 { 520, 1080 }, /* 15: 64-QAM 5/6 */
58};
59
60#define IS_HT_RATE(_rate) ((_rate) & 0x80)
61
62/*
Luis R. Rodriguezf078f202008-08-04 00:16:41 -070063 * Insert a chain of ath_buf (descriptors) on a txq and
64 * assume the descriptors are already chained together by caller.
65 * NB: must be called with txq lock held
66 */
67
68static void ath_tx_txqaddbuf(struct ath_softc *sc,
69 struct ath_txq *txq, struct list_head *head)
70{
71 struct ath_hal *ah = sc->sc_ah;
72 struct ath_buf *bf;
73 /*
74 * Insert the frame on the outbound list and
75 * pass it on to the hardware.
76 */
77
78 if (list_empty(head))
79 return;
80
81 bf = list_first_entry(head, struct ath_buf, list);
82
83 list_splice_tail_init(head, &txq->axq_q);
84 txq->axq_depth++;
85 txq->axq_totalqueued++;
86 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
87
88 DPRINTF(sc, ATH_DBG_QUEUE,
89 "%s: txq depth = %d\n", __func__, txq->axq_depth);
90
91 if (txq->axq_link == NULL) {
92 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
93 DPRINTF(sc, ATH_DBG_XMIT,
94 "%s: TXDP[%u] = %llx (%p)\n",
95 __func__, txq->axq_qnum,
96 ito64(bf->bf_daddr), bf->bf_desc);
97 } else {
98 *txq->axq_link = bf->bf_daddr;
99 DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n",
100 __func__,
101 txq->axq_qnum, txq->axq_link,
102 ito64(bf->bf_daddr), bf->bf_desc);
103 }
104 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
105 ath9k_hw_txstart(ah, txq->axq_qnum);
106}
107
108/* Get transmit rate index using rate in Kbps */
109
110static int ath_tx_findindex(const struct ath9k_rate_table *rt, int rate)
111{
112 int i;
113 int ndx = 0;
114
115 for (i = 0; i < rt->rateCount; i++) {
116 if (rt->info[i].rateKbps == rate) {
117 ndx = i;
118 break;
119 }
120 }
121
122 return ndx;
123}
124
125/* Check if it's okay to send out aggregates */
126
127static int ath_aggr_query(struct ath_softc *sc,
128 struct ath_node *an, u8 tidno)
129{
130 struct ath_atx_tid *tid;
131 tid = ATH_AN_2_TID(an, tidno);
132
133 if (tid->addba_exchangecomplete || tid->addba_exchangeinprogress)
134 return 1;
135 else
136 return 0;
137}
138
Sujith528f0c62008-10-29 10:14:26 +0530139/* Calculate Atheros packet type from IEEE80211 packet header */
140
141static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700142{
Sujith528f0c62008-10-29 10:14:26 +0530143 struct ieee80211_hdr *hdr;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700144 enum ath9k_pkt_type htype;
145 __le16 fc;
146
Sujith528f0c62008-10-29 10:14:26 +0530147 hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700148 fc = hdr->frame_control;
149
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700150 if (ieee80211_is_beacon(fc))
151 htype = ATH9K_PKT_TYPE_BEACON;
152 else if (ieee80211_is_probe_resp(fc))
153 htype = ATH9K_PKT_TYPE_PROBE_RESP;
154 else if (ieee80211_is_atim(fc))
155 htype = ATH9K_PKT_TYPE_ATIM;
156 else if (ieee80211_is_pspoll(fc))
157 htype = ATH9K_PKT_TYPE_PSPOLL;
158 else
159 htype = ATH9K_PKT_TYPE_NORMAL;
160
161 return htype;
162}
163
Sujith528f0c62008-10-29 10:14:26 +0530164static bool check_min_rate(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700165{
166 struct ieee80211_hdr *hdr;
Sujith528f0c62008-10-29 10:14:26 +0530167 bool use_minrate = false;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700168 __le16 fc;
169
170 hdr = (struct ieee80211_hdr *)skb->data;
171 fc = hdr->frame_control;
Johannes Berge6a98542008-10-21 12:40:02 +0200172
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700173 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) {
Sujith528f0c62008-10-29 10:14:26 +0530174 use_minrate = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700175 } else if (ieee80211_is_data(fc)) {
176 if (ieee80211_is_nullfunc(fc) ||
Sujith528f0c62008-10-29 10:14:26 +0530177 /* Port Access Entity (IEEE 802.1X) */
178 (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
179 use_minrate = true;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700180 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700181 }
182
Sujith528f0c62008-10-29 10:14:26 +0530183 return use_minrate;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700184}
185
Sujith528f0c62008-10-29 10:14:26 +0530186static int get_hw_crypto_keytype(struct sk_buff *skb)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700187{
Sujith528f0c62008-10-29 10:14:26 +0530188 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
189
190 if (tx_info->control.hw_key) {
191 if (tx_info->control.hw_key->alg == ALG_WEP)
192 return ATH9K_KEY_TYPE_WEP;
193 else if (tx_info->control.hw_key->alg == ALG_TKIP)
194 return ATH9K_KEY_TYPE_TKIP;
195 else if (tx_info->control.hw_key->alg == ALG_CCMP)
196 return ATH9K_KEY_TYPE_AES;
197 }
198
199 return ATH9K_KEY_TYPE_CLEAR;
200}
201
202static void setup_rate_retries(struct ath_softc *sc, struct sk_buff *skb)
203{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700204 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
205 struct ath_tx_info_priv *tx_info_priv;
Sujith528f0c62008-10-29 10:14:26 +0530206 struct ath_rc_series *rcs;
207 struct ieee80211_hdr *hdr;
208 const struct ath9k_rate_table *rt;
209 bool use_minrate;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700210 __le16 fc;
Sujith528f0c62008-10-29 10:14:26 +0530211 u8 rix;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700212
213 rt = sc->sc_currates;
214 BUG_ON(!rt);
215
Sujith528f0c62008-10-29 10:14:26 +0530216 hdr = (struct ieee80211_hdr *)skb->data;
217 fc = hdr->frame_control;
218 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif; /* HACK */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700219 rcs = tx_info_priv->rcs;
220
Sujith528f0c62008-10-29 10:14:26 +0530221 /* Check if min rates have to be used */
222 use_minrate = check_min_rate(skb);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700223
Sujith528f0c62008-10-29 10:14:26 +0530224 if (ieee80211_is_data(fc) && !use_minrate) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700225 if (is_multicast_ether_addr(hdr->addr1)) {
Sujith528f0c62008-10-29 10:14:26 +0530226 rcs[0].rix =
227 ath_tx_findindex(rt, tx_info_priv->min_rate);
228 /* mcast packets are not re-tried */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700229 rcs[0].tries = 1;
230 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700231 } else {
232 /* for management and control frames,
Sujith528f0c62008-10-29 10:14:26 +0530233 or for NULL and EAPOL frames */
234 if (use_minrate)
235 rcs[0].rix = ath_rate_findrateix(sc, tx_info_priv->min_rate);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700236 else
Sujith86b89ee2008-08-07 10:54:57 +0530237 rcs[0].rix = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700238 rcs[0].tries = ATH_MGT_TXMAXTRY;
239 }
Sujith528f0c62008-10-29 10:14:26 +0530240
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700241 rix = rcs[0].rix;
242
Sujith14cc7092008-08-26 08:10:49 +0530243 if (ieee80211_has_morefrags(fc) ||
244 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
Sujith14cc7092008-08-26 08:10:49 +0530245 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
246 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
247 /* reset tries but keep rate index */
248 rcs[0].tries = ATH_TXMAXTRY;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700249 }
Sujith528f0c62008-10-29 10:14:26 +0530250}
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700251
Sujith528f0c62008-10-29 10:14:26 +0530252/* Called only when tx aggregation is enabled and HT is supported */
253
254static void assign_aggr_tid_seqno(struct sk_buff *skb,
255 struct ath_buf *bf)
256{
257 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
258 struct ieee80211_hdr *hdr;
259 struct ath_node *an;
260 struct ath_atx_tid *tid;
261 __le16 fc;
262 u8 *qc;
263
264 if (!tx_info->control.sta)
265 return;
266
267 an = (struct ath_node *)tx_info->control.sta->drv_priv;
268 hdr = (struct ieee80211_hdr *)skb->data;
269 fc = hdr->frame_control;
270
271 /* Get tidno */
272
273 if (ieee80211_is_data_qos(fc)) {
274 qc = ieee80211_get_qos_ctl(hdr);
275 bf->bf_tidno = qc[0] & 0xf;
Sujith98deeea2008-08-11 14:05:46 +0530276 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700277
Sujith528f0c62008-10-29 10:14:26 +0530278 /* Get seqno */
279
280 if (ieee80211_is_data(fc) && !check_min_rate(skb)) {
281 /* For HT capable stations, we save tidno for later use.
282 * We also override seqno set by upper layer with the one
283 * in tx aggregation state.
284 *
285 * If fragmentation is on, the sequence number is
286 * not overridden, since it has been
287 * incremented by the fragmentation routine.
288 *
289 * FIXME: check if the fragmentation threshold exceeds
290 * IEEE80211 max.
291 */
292 tid = ATH_AN_2_TID(an, bf->bf_tidno);
293 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
294 IEEE80211_SEQ_SEQ_SHIFT);
295 bf->bf_seqno = tid->seq_next;
296 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
297 }
298}
299
300static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
301 struct ath_txq *txq)
302{
303 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
304 int flags = 0;
305
306 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
307 flags |= ATH9K_TXDESC_INTREQ;
308
309 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
310 flags |= ATH9K_TXDESC_NOACK;
311 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
312 flags |= ATH9K_TXDESC_RTSENA;
313
314 return flags;
315}
316
317static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
318{
319 struct ath_buf *bf = NULL;
320
321 spin_lock_bh(&sc->sc_txbuflock);
322
323 if (unlikely(list_empty(&sc->sc_txbuf))) {
324 spin_unlock_bh(&sc->sc_txbuflock);
325 return NULL;
326 }
327
328 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
329 list_del(&bf->list);
330
331 spin_unlock_bh(&sc->sc_txbuflock);
332
333 return bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700334}
335
336/* To complete a chain of buffers associated a frame */
337
338static void ath_tx_complete_buf(struct ath_softc *sc,
339 struct ath_buf *bf,
340 struct list_head *bf_q,
341 int txok, int sendbar)
342{
343 struct sk_buff *skb = bf->bf_mpdu;
344 struct ath_xmit_status tx_status;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700345
346 /*
347 * Set retry information.
348 * NB: Don't use the information in the descriptor, because the frame
349 * could be software retried.
350 */
351 tx_status.retries = bf->bf_retries;
352 tx_status.flags = 0;
353
354 if (sendbar)
355 tx_status.flags = ATH_TX_BAR;
356
357 if (!txok) {
358 tx_status.flags |= ATH_TX_ERROR;
359
Sujithcd3d39a2008-08-11 14:03:34 +0530360 if (bf_isxretried(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700361 tx_status.flags |= ATH_TX_XRETRY;
362 }
363 /* Unmap this frame */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700364 pci_unmap_single(sc->pdev,
Sujithff9b6622008-08-14 13:27:16 +0530365 bf->bf_dmacontext,
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700366 skb->len,
367 PCI_DMA_TODEVICE);
368 /* complete this frame */
Sujith528f0c62008-10-29 10:14:26 +0530369 ath_tx_complete(sc, skb, &tx_status);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700370
371 /*
372 * Return the list of ath_buf of this mpdu to free queue
373 */
374 spin_lock_bh(&sc->sc_txbuflock);
375 list_splice_tail_init(bf_q, &sc->sc_txbuf);
376 spin_unlock_bh(&sc->sc_txbuflock);
377}
378
379/*
380 * queue up a dest/ac pair for tx scheduling
381 * NB: must be called with txq lock held
382 */
383
384static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
385{
386 struct ath_atx_ac *ac = tid->ac;
387
388 /*
389 * if tid is paused, hold off
390 */
391 if (tid->paused)
392 return;
393
394 /*
395 * add tid to ac atmost once
396 */
397 if (tid->sched)
398 return;
399
400 tid->sched = true;
401 list_add_tail(&tid->list, &ac->tid_q);
402
403 /*
404 * add node ac to txq atmost once
405 */
406 if (ac->sched)
407 return;
408
409 ac->sched = true;
410 list_add_tail(&ac->list, &txq->axq_acq);
411}
412
413/* pause a tid */
414
415static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
416{
417 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
418
419 spin_lock_bh(&txq->axq_lock);
420
421 tid->paused++;
422
423 spin_unlock_bh(&txq->axq_lock);
424}
425
426/* resume a tid and schedule aggregate */
427
428void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
429{
430 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
431
432 ASSERT(tid->paused > 0);
433 spin_lock_bh(&txq->axq_lock);
434
435 tid->paused--;
436
437 if (tid->paused > 0)
438 goto unlock;
439
440 if (list_empty(&tid->buf_q))
441 goto unlock;
442
443 /*
444 * Add this TID to scheduler and try to send out aggregates
445 */
446 ath_tx_queue_tid(txq, tid);
447 ath_txq_schedule(sc, txq);
448unlock:
449 spin_unlock_bh(&txq->axq_lock);
450}
451
452/* Compute the number of bad frames */
453
Sujithb5aa9bf2008-10-29 10:13:31 +0530454static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
455 int txok)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700456{
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700457 struct ath_buf *bf_last = bf->bf_lastbf;
458 struct ath_desc *ds = bf_last->bf_desc;
459 u16 seq_st = 0;
460 u32 ba[WME_BA_BMP_SIZE >> 5];
461 int ba_index;
462 int nbad = 0;
463 int isaggr = 0;
464
Sujithb5aa9bf2008-10-29 10:13:31 +0530465 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700466 return 0;
467
Sujithcd3d39a2008-08-11 14:03:34 +0530468 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700469 if (isaggr) {
470 seq_st = ATH_DS_BA_SEQ(ds);
471 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
472 }
473
474 while (bf) {
475 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
476 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
477 nbad++;
478
479 bf = bf->bf_next;
480 }
481
482 return nbad;
483}
484
485static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
486{
487 struct sk_buff *skb;
488 struct ieee80211_hdr *hdr;
489
Sujithcd3d39a2008-08-11 14:03:34 +0530490 bf->bf_state.bf_type |= BUF_RETRY;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700491 bf->bf_retries++;
492
493 skb = bf->bf_mpdu;
494 hdr = (struct ieee80211_hdr *)skb->data;
495 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
496}
497
498/* Update block ack window */
499
500static void ath_tx_update_baw(struct ath_softc *sc,
501 struct ath_atx_tid *tid, int seqno)
502{
503 int index, cindex;
504
505 index = ATH_BA_INDEX(tid->seq_start, seqno);
506 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
507
508 tid->tx_buf[cindex] = NULL;
509
510 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
511 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
512 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
513 }
514}
515
516/*
517 * ath_pkt_dur - compute packet duration (NB: not NAV)
518 *
519 * rix - rate index
520 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
521 * width - 0 for 20 MHz, 1 for 40 MHz
522 * half_gi - to use 4us v/s 3.6 us for symbol time
523 */
524
525static u32 ath_pkt_duration(struct ath_softc *sc,
526 u8 rix,
527 struct ath_buf *bf,
528 int width,
529 int half_gi,
530 bool shortPreamble)
531{
532 const struct ath9k_rate_table *rt = sc->sc_currates;
533 u32 nbits, nsymbits, duration, nsymbols;
534 u8 rc;
535 int streams, pktlen;
536
Sujithcd3d39a2008-08-11 14:03:34 +0530537 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700538 rc = rt->info[rix].rateCode;
539
540 /*
541 * for legacy rates, use old function to compute packet duration
542 */
543 if (!IS_HT_RATE(rc))
544 return ath9k_hw_computetxtime(sc->sc_ah,
545 rt,
546 pktlen,
547 rix,
548 shortPreamble);
549 /*
550 * find number of symbols: PLCP + data
551 */
552 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
553 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
554 nsymbols = (nbits + nsymbits - 1) / nsymbits;
555
556 if (!half_gi)
557 duration = SYMBOL_TIME(nsymbols);
558 else
559 duration = SYMBOL_TIME_HALFGI(nsymbols);
560
561 /*
562 * addup duration for legacy/ht training and signal fields
563 */
564 streams = HT_RC_2_STREAMS(rc);
565 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
566 return duration;
567}
568
569/* Rate module function to set rate related fields in tx descriptor */
570
571static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
572{
573 struct ath_hal *ah = sc->sc_ah;
574 const struct ath9k_rate_table *rt;
575 struct ath_desc *ds = bf->bf_desc;
576 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
577 struct ath9k_11n_rate_series series[4];
Sujith43453b32008-10-29 10:14:52 +0530578 int i, flags, rtsctsena = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700579 u32 ctsduration = 0;
580 u8 rix = 0, cix, ctsrate = 0;
Sujith98deeea2008-08-11 14:05:46 +0530581 u32 aggr_limit_with_rts = ah->ah_caps.rts_aggr_limit;
Sujith528f0c62008-10-29 10:14:26 +0530582 struct ath_node *an = NULL;
583 struct sk_buff *skb;
584 struct ieee80211_tx_info *tx_info;
585
586 skb = (struct sk_buff *)bf->bf_mpdu;
587 tx_info = IEEE80211_SKB_CB(skb);
588
589 if (tx_info->control.sta)
590 an = (struct ath_node *)tx_info->control.sta->drv_priv;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700591
592 /*
593 * get the cix for the lowest valid rix.
594 */
595 rt = sc->sc_currates;
596 for (i = 4; i--;) {
597 if (bf->bf_rcs[i].tries) {
598 rix = bf->bf_rcs[i].rix;
599 break;
600 }
601 }
602 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
603 cix = rt->info[rix].controlRate;
604
605 /*
606 * If 802.11g protection is enabled, determine whether
607 * to use RTS/CTS or just CTS. Note that this is only
608 * done for OFDM/HT unicast frames.
609 */
610 if (sc->sc_protmode != PROT_M_NONE &&
611 (rt->info[rix].phy == PHY_OFDM ||
612 rt->info[rix].phy == PHY_HT) &&
613 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
614 if (sc->sc_protmode == PROT_M_RTSCTS)
615 flags = ATH9K_TXDESC_RTSENA;
616 else if (sc->sc_protmode == PROT_M_CTSONLY)
617 flags = ATH9K_TXDESC_CTSENA;
618
619 cix = rt->info[sc->sc_protrix].controlRate;
620 rtsctsena = 1;
621 }
622
623 /* For 11n, the default behavior is to enable RTS for
624 * hw retried frames. We enable the global flag here and
625 * let rate series flags determine which rates will actually
626 * use RTS.
627 */
Sujithcd3d39a2008-08-11 14:03:34 +0530628 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700629 /*
630 * 802.11g protection not needed, use our default behavior
631 */
632 if (!rtsctsena)
633 flags = ATH9K_TXDESC_RTSENA;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700634 }
635
636 /*
637 * Set protection if aggregate protection on
638 */
639 if (sc->sc_config.ath_aggr_prot &&
Sujithcd3d39a2008-08-11 14:03:34 +0530640 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700641 flags = ATH9K_TXDESC_RTSENA;
642 cix = rt->info[sc->sc_protrix].controlRate;
643 rtsctsena = 1;
644 }
645
646 /*
647 * For AR5416 - RTS cannot be followed by a frame larger than 8K.
648 */
Sujithcd3d39a2008-08-11 14:03:34 +0530649 if (bf_isaggr(bf) && (bf->bf_al > aggr_limit_with_rts)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700650 /*
651 * Ensure that in the case of SM Dynamic power save
652 * while we are bursting the second aggregate the
653 * RTS is cleared.
654 */
655 flags &= ~(ATH9K_TXDESC_RTSENA);
656 }
657
658 /*
659 * CTS transmit rate is derived from the transmit rate
660 * by looking in the h/w rate table. We must also factor
661 * in whether or not a short preamble is to be used.
662 */
663 /* NB: cix is set above where RTS/CTS is enabled */
664 BUG_ON(cix == 0xff);
665 ctsrate = rt->info[cix].rateCode |
Sujithcd3d39a2008-08-11 14:03:34 +0530666 (bf_isshpreamble(bf) ? rt->info[cix].shortPreamble : 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700667
668 /*
669 * Setup HAL rate series
670 */
Luis R. Rodriguez0345f372008-10-03 15:45:25 -0700671 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700672
673 for (i = 0; i < 4; i++) {
674 if (!bf->bf_rcs[i].tries)
675 continue;
676
677 rix = bf->bf_rcs[i].rix;
678
679 series[i].Rate = rt->info[rix].rateCode |
Sujithcd3d39a2008-08-11 14:03:34 +0530680 (bf_isshpreamble(bf) ? rt->info[rix].shortPreamble : 0);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700681
682 series[i].Tries = bf->bf_rcs[i].tries;
683
684 series[i].RateFlags = (
685 (bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ?
686 ATH9K_RATESERIES_RTS_CTS : 0) |
687 ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ?
688 ATH9K_RATESERIES_2040 : 0) |
689 ((bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG) ?
690 ATH9K_RATESERIES_HALFGI : 0);
691
692 series[i].PktDuration = ath_pkt_duration(
693 sc, rix, bf,
694 (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0,
695 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG),
Sujithcd3d39a2008-08-11 14:03:34 +0530696 bf_isshpreamble(bf));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700697
Sujith43453b32008-10-29 10:14:52 +0530698 if (bf_isht(bf))
699 series[i].ChSel =
700 ath_chainmask_sel_logic(sc, an);
701 else
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700702 series[i].ChSel = sc->sc_tx_chainmask;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700703
704 if (rtsctsena)
705 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700706 }
707
708 /*
709 * For non-HT devices, calculate RTS/CTS duration in software
710 * and disable multi-rate retry.
711 */
Sujith60b67f52008-08-07 10:52:38 +0530712 if (flags && !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700713 /*
714 * Compute the transmit duration based on the frame
715 * size and the size of an ACK frame. We call into the
716 * HAL to do the computation since it depends on the
717 * characteristics of the actual PHY being used.
718 *
719 * NB: CTS is assumed the same size as an ACK so we can
720 * use the precalculated ACK durations.
721 */
722 if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */
Sujithcd3d39a2008-08-11 14:03:34 +0530723 ctsduration += bf_isshpreamble(bf) ?
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700724 rt->info[cix].spAckDuration :
725 rt->info[cix].lpAckDuration;
726 }
727
728 ctsduration += series[0].PktDuration;
729
730 if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */
Sujithcd3d39a2008-08-11 14:03:34 +0530731 ctsduration += bf_isshpreamble(bf) ?
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700732 rt->info[rix].spAckDuration :
733 rt->info[rix].lpAckDuration;
734 }
735
736 /*
737 * Disable multi-rate retry when using RTS/CTS by clearing
738 * series 1, 2 and 3.
739 */
Luis R. Rodriguez0345f372008-10-03 15:45:25 -0700740 memset(&series[1], 0, sizeof(struct ath9k_11n_rate_series) * 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700741 }
742
743 /*
744 * set dur_update_en for l-sig computation except for PS-Poll frames
745 */
746 ath9k_hw_set11n_ratescenario(ah, ds, lastds,
Sujithcd3d39a2008-08-11 14:03:34 +0530747 !bf_ispspoll(bf),
748 ctsrate,
749 ctsduration,
750 series, 4, flags);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700751 if (sc->sc_config.ath_aggr_prot && flags)
752 ath9k_hw_set11n_burstduration(ah, ds, 8192);
753}
754
755/*
756 * Function to send a normal HT (non-AMPDU) frame
757 * NB: must be called with txq lock held
758 */
759
760static int ath_tx_send_normal(struct ath_softc *sc,
761 struct ath_txq *txq,
762 struct ath_atx_tid *tid,
763 struct list_head *bf_head)
764{
765 struct ath_buf *bf;
766 struct sk_buff *skb;
767 struct ieee80211_tx_info *tx_info;
768 struct ath_tx_info_priv *tx_info_priv;
769
770 BUG_ON(list_empty(bf_head));
771
772 bf = list_first_entry(bf_head, struct ath_buf, list);
Sujithcd3d39a2008-08-11 14:03:34 +0530773 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700774
775 skb = (struct sk_buff *)bf->bf_mpdu;
776 tx_info = IEEE80211_SKB_CB(skb);
Johannes Berge6a98542008-10-21 12:40:02 +0200777
778 /* XXX: HACK! */
779 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700780 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
781
782 /* update starting sequence number for subsequent ADDBA request */
783 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
784
785 /* Queue to h/w without aggregation */
786 bf->bf_nframes = 1;
787 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
788 ath_buf_set_rate(sc, bf);
789 ath_tx_txqaddbuf(sc, txq, bf_head);
790
791 return 0;
792}
793
794/* flush tid's software queue and send frames as non-ampdu's */
795
796static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
797{
798 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
799 struct ath_buf *bf;
800 struct list_head bf_head;
801 INIT_LIST_HEAD(&bf_head);
802
803 ASSERT(tid->paused > 0);
804 spin_lock_bh(&txq->axq_lock);
805
806 tid->paused--;
807
808 if (tid->paused > 0) {
809 spin_unlock_bh(&txq->axq_lock);
810 return;
811 }
812
813 while (!list_empty(&tid->buf_q)) {
814 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
Sujithcd3d39a2008-08-11 14:03:34 +0530815 ASSERT(!bf_isretried(bf));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700816 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
817 ath_tx_send_normal(sc, txq, tid, &bf_head);
818 }
819
820 spin_unlock_bh(&txq->axq_lock);
821}
822
823/* Completion routine of an aggregate */
824
825static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
826 struct ath_txq *txq,
827 struct ath_buf *bf,
828 struct list_head *bf_q,
829 int txok)
830{
Sujith528f0c62008-10-29 10:14:26 +0530831 struct ath_node *an = NULL;
832 struct sk_buff *skb;
833 struct ieee80211_tx_info *tx_info;
834 struct ath_atx_tid *tid = NULL;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700835 struct ath_buf *bf_last = bf->bf_lastbf;
836 struct ath_desc *ds = bf_last->bf_desc;
837 struct ath_buf *bf_next, *bf_lastq = NULL;
838 struct list_head bf_head, bf_pending;
839 u16 seq_st = 0;
840 u32 ba[WME_BA_BMP_SIZE >> 5];
841 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700842
Sujith528f0c62008-10-29 10:14:26 +0530843 skb = (struct sk_buff *)bf->bf_mpdu;
844 tx_info = IEEE80211_SKB_CB(skb);
845
846 if (tx_info->control.sta) {
847 an = (struct ath_node *)tx_info->control.sta->drv_priv;
848 tid = ATH_AN_2_TID(an, bf->bf_tidno);
849 }
850
Sujithcd3d39a2008-08-11 14:03:34 +0530851 isaggr = bf_isaggr(bf);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700852 if (isaggr) {
853 if (txok) {
854 if (ATH_DS_TX_BA(ds)) {
855 /*
856 * extract starting sequence and
857 * block-ack bitmap
858 */
859 seq_st = ATH_DS_BA_SEQ(ds);
860 memcpy(ba,
861 ATH_DS_BA_BITMAP(ds),
862 WME_BA_BMP_SIZE >> 3);
863 } else {
Luis R. Rodriguez0345f372008-10-03 15:45:25 -0700864 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700865
866 /*
867 * AR5416 can become deaf/mute when BA
868 * issue happens. Chip needs to be reset.
869 * But AP code may have sychronization issues
870 * when perform internal reset in this routine.
871 * Only enable reset in STA mode for now.
872 */
Sujithb4696c8b2008-08-11 14:04:52 +0530873 if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700874 needreset = 1;
875 }
876 } else {
Luis R. Rodriguez0345f372008-10-03 15:45:25 -0700877 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700878 }
879 }
880
881 INIT_LIST_HEAD(&bf_pending);
882 INIT_LIST_HEAD(&bf_head);
883
884 while (bf) {
885 txfail = txpending = 0;
886 bf_next = bf->bf_next;
887
888 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
889 /* transmit completion, subframe is
890 * acked by block ack */
891 } else if (!isaggr && txok) {
892 /* transmit completion */
893 } else {
894
Sujithb5aa9bf2008-10-29 10:13:31 +0530895 if (!tid->cleanup_inprogress &&
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700896 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
897 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
898 ath_tx_set_retry(sc, bf);
899 txpending = 1;
900 } else {
Sujithcd3d39a2008-08-11 14:03:34 +0530901 bf->bf_state.bf_type |= BUF_XRETRY;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700902 txfail = 1;
903 sendbar = 1;
904 }
905 } else {
906 /*
907 * cleanup in progress, just fail
908 * the un-acked sub-frames
909 */
910 txfail = 1;
911 }
912 }
913 /*
914 * Remove ath_buf's of this sub-frame from aggregate queue.
915 */
916 if (bf_next == NULL) { /* last subframe in the aggregate */
917 ASSERT(bf->bf_lastfrm == bf_last);
918
919 /*
920 * The last descriptor of the last sub frame could be
921 * a holding descriptor for h/w. If that's the case,
922 * bf->bf_lastfrm won't be in the bf_q.
923 * Make sure we handle bf_q properly here.
924 */
925
926 if (!list_empty(bf_q)) {
927 bf_lastq = list_entry(bf_q->prev,
928 struct ath_buf, list);
929 list_cut_position(&bf_head,
930 bf_q, &bf_lastq->list);
931 } else {
932 /*
933 * XXX: if the last subframe only has one
934 * descriptor which is also being used as
935 * a holding descriptor. Then the ath_buf
936 * is not in the bf_q at all.
937 */
938 INIT_LIST_HEAD(&bf_head);
939 }
940 } else {
941 ASSERT(!list_empty(bf_q));
942 list_cut_position(&bf_head,
943 bf_q, &bf->bf_lastfrm->list);
944 }
945
946 if (!txpending) {
947 /*
948 * complete the acked-ones/xretried ones; update
949 * block-ack window
950 */
951 spin_lock_bh(&txq->axq_lock);
952 ath_tx_update_baw(sc, tid, bf->bf_seqno);
953 spin_unlock_bh(&txq->axq_lock);
954
955 /* complete this sub-frame */
956 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
957 } else {
958 /*
959 * retry the un-acked ones
960 */
961 /*
962 * XXX: if the last descriptor is holding descriptor,
963 * in order to requeue the frame to software queue, we
964 * need to allocate a new descriptor and
965 * copy the content of holding descriptor to it.
966 */
967 if (bf->bf_next == NULL &&
968 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
969 struct ath_buf *tbf;
970
971 /* allocate new descriptor */
972 spin_lock_bh(&sc->sc_txbuflock);
973 ASSERT(!list_empty((&sc->sc_txbuf)));
974 tbf = list_first_entry(&sc->sc_txbuf,
975 struct ath_buf, list);
976 list_del(&tbf->list);
977 spin_unlock_bh(&sc->sc_txbuflock);
978
979 ATH_TXBUF_RESET(tbf);
980
981 /* copy descriptor content */
982 tbf->bf_mpdu = bf_last->bf_mpdu;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -0700983 tbf->bf_buf_addr = bf_last->bf_buf_addr;
984 *(tbf->bf_desc) = *(bf_last->bf_desc);
985
986 /* link it to the frame */
987 if (bf_lastq) {
988 bf_lastq->bf_desc->ds_link =
989 tbf->bf_daddr;
990 bf->bf_lastfrm = tbf;
991 ath9k_hw_cleartxdesc(sc->sc_ah,
992 bf->bf_lastfrm->bf_desc);
993 } else {
994 tbf->bf_state = bf_last->bf_state;
995 tbf->bf_lastfrm = tbf;
996 ath9k_hw_cleartxdesc(sc->sc_ah,
997 tbf->bf_lastfrm->bf_desc);
998
999 /* copy the DMA context */
Sujithff9b6622008-08-14 13:27:16 +05301000 tbf->bf_dmacontext =
1001 bf_last->bf_dmacontext;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001002 }
1003 list_add_tail(&tbf->list, &bf_head);
1004 } else {
1005 /*
1006 * Clear descriptor status words for
1007 * software retry
1008 */
1009 ath9k_hw_cleartxdesc(sc->sc_ah,
Sujithff9b6622008-08-14 13:27:16 +05301010 bf->bf_lastfrm->bf_desc);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001011 }
1012
1013 /*
1014 * Put this buffer to the temporary pending
1015 * queue to retain ordering
1016 */
1017 list_splice_tail_init(&bf_head, &bf_pending);
1018 }
1019
1020 bf = bf_next;
1021 }
1022
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001023 if (tid->cleanup_inprogress) {
1024 /* check to see if we're done with cleaning the h/w queue */
1025 spin_lock_bh(&txq->axq_lock);
1026
1027 if (tid->baw_head == tid->baw_tail) {
1028 tid->addba_exchangecomplete = 0;
1029 tid->addba_exchangeattempts = 0;
1030 spin_unlock_bh(&txq->axq_lock);
1031
1032 tid->cleanup_inprogress = false;
1033
1034 /* send buffered frames as singles */
1035 ath_tx_flush_tid(sc, tid);
1036 } else
1037 spin_unlock_bh(&txq->axq_lock);
1038
1039 return;
1040 }
1041
1042 /*
1043 * prepend un-acked frames to the beginning of the pending frame queue
1044 */
1045 if (!list_empty(&bf_pending)) {
1046 spin_lock_bh(&txq->axq_lock);
1047 /* Note: we _prepend_, we _do_not_ at to
1048 * the end of the queue ! */
1049 list_splice(&bf_pending, &tid->buf_q);
1050 ath_tx_queue_tid(txq, tid);
1051 spin_unlock_bh(&txq->axq_lock);
1052 }
1053
1054 if (needreset)
Sujithf45144e2008-08-11 14:02:53 +05301055 ath_reset(sc, false);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001056
1057 return;
1058}
1059
1060/* Process completed xmit descriptors from the specified queue */
1061
1062static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1063{
1064 struct ath_hal *ah = sc->sc_ah;
1065 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1066 struct list_head bf_head;
1067 struct ath_desc *ds, *tmp_ds;
1068 struct sk_buff *skb;
1069 struct ieee80211_tx_info *tx_info;
1070 struct ath_tx_info_priv *tx_info_priv;
1071 int nacked, txok, nbad = 0, isrifs = 0;
1072 int status;
1073
1074 DPRINTF(sc, ATH_DBG_QUEUE,
1075 "%s: tx queue %d (%x), link %p\n", __func__,
1076 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1077 txq->axq_link);
1078
1079 nacked = 0;
1080 for (;;) {
1081 spin_lock_bh(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001082 if (list_empty(&txq->axq_q)) {
1083 txq->axq_link = NULL;
1084 txq->axq_linkbuf = NULL;
1085 spin_unlock_bh(&txq->axq_lock);
1086 break;
1087 }
1088 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
1089
1090 /*
1091 * There is a race condition that a BH gets scheduled
1092 * after sw writes TxE and before hw re-load the last
1093 * descriptor to get the newly chained one.
1094 * Software must keep the last DONE descriptor as a
1095 * holding descriptor - software does so by marking
1096 * it with the STALE flag.
1097 */
1098 bf_held = NULL;
1099 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
1100 bf_held = bf;
1101 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1102 /* FIXME:
1103 * The holding descriptor is the last
1104 * descriptor in queue. It's safe to remove
1105 * the last holding descriptor in BH context.
1106 */
1107 spin_unlock_bh(&txq->axq_lock);
1108 break;
1109 } else {
1110 /* Lets work with the next buffer now */
1111 bf = list_entry(bf_held->list.next,
1112 struct ath_buf, list);
1113 }
1114 }
1115
1116 lastbf = bf->bf_lastbf;
1117 ds = lastbf->bf_desc; /* NB: last decriptor */
1118
1119 status = ath9k_hw_txprocdesc(ah, ds);
1120 if (status == -EINPROGRESS) {
1121 spin_unlock_bh(&txq->axq_lock);
1122 break;
1123 }
1124 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1125 txq->axq_lastdsWithCTS = NULL;
1126 if (ds == txq->axq_gatingds)
1127 txq->axq_gatingds = NULL;
1128
1129 /*
1130 * Remove ath_buf's of the same transmit unit from txq,
1131 * however leave the last descriptor back as the holding
1132 * descriptor for hw.
1133 */
1134 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1135 INIT_LIST_HEAD(&bf_head);
1136
1137 if (!list_is_singular(&lastbf->list))
1138 list_cut_position(&bf_head,
1139 &txq->axq_q, lastbf->list.prev);
1140
1141 txq->axq_depth--;
1142
Sujithcd3d39a2008-08-11 14:03:34 +05301143 if (bf_isaggr(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001144 txq->axq_aggr_depth--;
1145
1146 txok = (ds->ds_txstat.ts_status == 0);
1147
1148 spin_unlock_bh(&txq->axq_lock);
1149
1150 if (bf_held) {
1151 list_del(&bf_held->list);
1152 spin_lock_bh(&sc->sc_txbuflock);
1153 list_add_tail(&bf_held->list, &sc->sc_txbuf);
1154 spin_unlock_bh(&sc->sc_txbuflock);
1155 }
1156
Sujithcd3d39a2008-08-11 14:03:34 +05301157 if (!bf_isampdu(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001158 /*
1159 * This frame is sent out as a single frame.
1160 * Use hardware retry status for this frame.
1161 */
1162 bf->bf_retries = ds->ds_txstat.ts_longretry;
1163 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
Sujithcd3d39a2008-08-11 14:03:34 +05301164 bf->bf_state.bf_type |= BUF_XRETRY;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001165 nbad = 0;
1166 } else {
1167 nbad = ath_tx_num_badfrms(sc, bf, txok);
1168 }
1169 skb = bf->bf_mpdu;
1170 tx_info = IEEE80211_SKB_CB(skb);
Johannes Berge6a98542008-10-21 12:40:02 +02001171
1172 /* XXX: HACK! */
1173 tx_info_priv = (struct ath_tx_info_priv *) tx_info->control.vif;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001174 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
1175 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1176 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
1177 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
1178 if (ds->ds_txstat.ts_status == 0)
1179 nacked++;
1180
Sujithcd3d39a2008-08-11 14:03:34 +05301181 if (bf_isdata(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001182 if (isrifs)
1183 tmp_ds = bf->bf_rifslast->bf_desc;
1184 else
1185 tmp_ds = ds;
1186 memcpy(&tx_info_priv->tx,
1187 &tmp_ds->ds_txstat,
1188 sizeof(tx_info_priv->tx));
1189 tx_info_priv->n_frames = bf->bf_nframes;
1190 tx_info_priv->n_bad_frames = nbad;
1191 }
1192 }
1193
1194 /*
1195 * Complete this transmit unit
1196 */
Sujithcd3d39a2008-08-11 14:03:34 +05301197 if (bf_isampdu(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001198 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1199 else
1200 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1201
1202 /* Wake up mac80211 queue */
1203
1204 spin_lock_bh(&txq->axq_lock);
1205 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1206 (ATH_TXBUF - 20)) {
1207 int qnum;
1208 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1209 if (qnum != -1) {
1210 ieee80211_wake_queue(sc->hw, qnum);
1211 txq->stopped = 0;
1212 }
1213
1214 }
1215
1216 /*
1217 * schedule any pending packets if aggregation is enabled
1218 */
Sujith672840a2008-08-11 14:05:08 +05301219 if (sc->sc_flags & SC_OP_TXAGGR)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001220 ath_txq_schedule(sc, txq);
1221 spin_unlock_bh(&txq->axq_lock);
1222 }
1223 return nacked;
1224}
1225
1226static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1227{
1228 struct ath_hal *ah = sc->sc_ah;
1229
1230 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1231 DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n",
1232 __func__, txq->axq_qnum,
1233 ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link);
1234}
1235
1236/* Drain only the data queues */
1237
1238static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1239{
1240 struct ath_hal *ah = sc->sc_ah;
1241 int i;
1242 int npend = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001243
1244 /* XXX return value */
Sujith672840a2008-08-11 14:05:08 +05301245 if (!(sc->sc_flags & SC_OP_INVALID)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001246 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1247 if (ATH_TXQ_SETUP(sc, i)) {
1248 ath_tx_stopdma(sc, &sc->sc_txq[i]);
1249
1250 /* The TxDMA may not really be stopped.
1251 * Double check the hal tx pending count */
1252 npend += ath9k_hw_numtxpending(ah,
1253 sc->sc_txq[i].axq_qnum);
1254 }
1255 }
1256 }
1257
1258 if (npend) {
1259 int status;
1260
1261 /* TxDMA not stopped, reset the hal */
1262 DPRINTF(sc, ATH_DBG_XMIT,
1263 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
1264
1265 spin_lock_bh(&sc->sc_resetlock);
Sujithb4696c8b2008-08-11 14:04:52 +05301266 if (!ath9k_hw_reset(ah,
Sujith927e70e2008-08-14 13:26:34 +05301267 sc->sc_ah->ah_curchan,
1268 sc->sc_ht_info.tx_chan_width,
1269 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1270 sc->sc_ht_extprotspacing, true, &status)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001271
1272 DPRINTF(sc, ATH_DBG_FATAL,
1273 "%s: unable to reset hardware; hal status %u\n",
1274 __func__,
1275 status);
1276 }
1277 spin_unlock_bh(&sc->sc_resetlock);
1278 }
1279
1280 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1281 if (ATH_TXQ_SETUP(sc, i))
1282 ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
1283 }
1284}
1285
1286/* Add a sub-frame to block ack window */
1287
1288static void ath_tx_addto_baw(struct ath_softc *sc,
1289 struct ath_atx_tid *tid,
1290 struct ath_buf *bf)
1291{
1292 int index, cindex;
1293
Sujithcd3d39a2008-08-11 14:03:34 +05301294 if (bf_isretried(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001295 return;
1296
1297 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1298 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1299
1300 ASSERT(tid->tx_buf[cindex] == NULL);
1301 tid->tx_buf[cindex] = bf;
1302
1303 if (index >= ((tid->baw_tail - tid->baw_head) &
1304 (ATH_TID_MAX_BUFS - 1))) {
1305 tid->baw_tail = cindex;
1306 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1307 }
1308}
1309
1310/*
1311 * Function to send an A-MPDU
1312 * NB: must be called with txq lock held
1313 */
1314
1315static int ath_tx_send_ampdu(struct ath_softc *sc,
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001316 struct ath_atx_tid *tid,
1317 struct list_head *bf_head,
1318 struct ath_tx_control *txctl)
1319{
1320 struct ath_buf *bf;
1321 struct sk_buff *skb;
1322 struct ieee80211_tx_info *tx_info;
1323 struct ath_tx_info_priv *tx_info_priv;
1324
1325 BUG_ON(list_empty(bf_head));
1326
1327 bf = list_first_entry(bf_head, struct ath_buf, list);
Sujithcd3d39a2008-08-11 14:03:34 +05301328 bf->bf_state.bf_type |= BUF_AMPDU;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001329
1330 /*
1331 * Do not queue to h/w when any of the following conditions is true:
1332 * - there are pending frames in software queue
1333 * - the TID is currently paused for ADDBA/BAR request
1334 * - seqno is not within block-ack window
1335 * - h/w queue depth exceeds low water mark
1336 */
1337 if (!list_empty(&tid->buf_q) || tid->paused ||
1338 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
Sujith528f0c62008-10-29 10:14:26 +05301339 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001340 /*
1341 * Add this frame to software queue for scheduling later
1342 * for aggregation.
1343 */
1344 list_splice_tail_init(bf_head, &tid->buf_q);
Sujith528f0c62008-10-29 10:14:26 +05301345 ath_tx_queue_tid(txctl->txq, tid);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001346 return 0;
1347 }
1348
1349 skb = (struct sk_buff *)bf->bf_mpdu;
1350 tx_info = IEEE80211_SKB_CB(skb);
Johannes Berge6a98542008-10-21 12:40:02 +02001351 /* XXX: HACK! */
1352 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001353 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1354
1355 /* Add sub-frame to BAW */
1356 ath_tx_addto_baw(sc, tid, bf);
1357
1358 /* Queue to h/w without aggregation */
1359 bf->bf_nframes = 1;
1360 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1361 ath_buf_set_rate(sc, bf);
Sujith528f0c62008-10-29 10:14:26 +05301362 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001363 return 0;
1364}
1365
1366/*
1367 * looks up the rate
1368 * returns aggr limit based on lowest of the rates
1369 */
1370
1371static u32 ath_lookup_rate(struct ath_softc *sc,
Johannes Bergae5eb022008-10-14 16:58:37 +02001372 struct ath_buf *bf,
1373 struct ath_atx_tid *tid)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001374{
1375 const struct ath9k_rate_table *rt = sc->sc_currates;
1376 struct sk_buff *skb;
1377 struct ieee80211_tx_info *tx_info;
1378 struct ath_tx_info_priv *tx_info_priv;
1379 u32 max_4ms_framelen, frame_length;
1380 u16 aggr_limit, legacy = 0, maxampdu;
1381 int i;
1382
1383
1384 skb = (struct sk_buff *)bf->bf_mpdu;
1385 tx_info = IEEE80211_SKB_CB(skb);
1386 tx_info_priv = (struct ath_tx_info_priv *)
Johannes Berge6a98542008-10-21 12:40:02 +02001387 tx_info->control.vif; /* XXX: HACK! */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001388 memcpy(bf->bf_rcs,
1389 tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1390
1391 /*
1392 * Find the lowest frame length among the rate series that will have a
1393 * 4ms transmit duration.
1394 * TODO - TXOP limit needs to be considered.
1395 */
1396 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1397
1398 for (i = 0; i < 4; i++) {
1399 if (bf->bf_rcs[i].tries) {
1400 frame_length = bf->bf_rcs[i].max_4ms_framelen;
1401
1402 if (rt->info[bf->bf_rcs[i].rix].phy != PHY_HT) {
1403 legacy = 1;
1404 break;
1405 }
1406
1407 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1408 }
1409 }
1410
1411 /*
1412 * limit aggregate size by the minimum rate if rate selected is
1413 * not a probe rate, if rate selected is a probe rate then
1414 * avoid aggregation of this packet.
1415 */
1416 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1417 return 0;
1418
1419 aggr_limit = min(max_4ms_framelen,
1420 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1421
1422 /*
1423 * h/w can accept aggregates upto 16 bit lengths (65535).
1424 * The IE, however can hold upto 65536, which shows up here
1425 * as zero. Ignore 65536 since we are constrained by hw.
1426 */
Johannes Bergae5eb022008-10-14 16:58:37 +02001427 maxampdu = tid->an->maxampdu;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001428 if (maxampdu)
1429 aggr_limit = min(aggr_limit, maxampdu);
1430
1431 return aggr_limit;
1432}
1433
1434/*
1435 * returns the number of delimiters to be added to
1436 * meet the minimum required mpdudensity.
1437 * caller should make sure that the rate is HT rate .
1438 */
1439
1440static int ath_compute_num_delims(struct ath_softc *sc,
Johannes Bergae5eb022008-10-14 16:58:37 +02001441 struct ath_atx_tid *tid,
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001442 struct ath_buf *bf,
1443 u16 frmlen)
1444{
1445 const struct ath9k_rate_table *rt = sc->sc_currates;
1446 u32 nsymbits, nsymbols, mpdudensity;
1447 u16 minlen;
1448 u8 rc, flags, rix;
1449 int width, half_gi, ndelim, mindelim;
1450
1451 /* Select standard number of delimiters based on frame length alone */
1452 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
1453
1454 /*
1455 * If encryption enabled, hardware requires some more padding between
1456 * subframes.
1457 * TODO - this could be improved to be dependent on the rate.
1458 * The hardware can keep up at lower rates, but not higher rates
1459 */
1460 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
1461 ndelim += ATH_AGGR_ENCRYPTDELIM;
1462
1463 /*
1464 * Convert desired mpdu density from microeconds to bytes based
1465 * on highest rate in rate series (i.e. first rate) to determine
1466 * required minimum length for subframe. Take into account
1467 * whether high rate is 20 or 40Mhz and half or full GI.
1468 */
Johannes Bergae5eb022008-10-14 16:58:37 +02001469 mpdudensity = tid->an->mpdudensity;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001470
1471 /*
1472 * If there is no mpdu density restriction, no further calculation
1473 * is needed.
1474 */
1475 if (mpdudensity == 0)
1476 return ndelim;
1477
1478 rix = bf->bf_rcs[0].rix;
1479 flags = bf->bf_rcs[0].flags;
1480 rc = rt->info[rix].rateCode;
1481 width = (flags & ATH_RC_CW40_FLAG) ? 1 : 0;
1482 half_gi = (flags & ATH_RC_SGI_FLAG) ? 1 : 0;
1483
1484 if (half_gi)
1485 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
1486 else
1487 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
1488
1489 if (nsymbols == 0)
1490 nsymbols = 1;
1491
1492 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1493 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1494
1495 /* Is frame shorter than required minimum length? */
1496 if (frmlen < minlen) {
1497 /* Get the minimum number of delimiters required. */
1498 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1499 ndelim = max(mindelim, ndelim);
1500 }
1501
1502 return ndelim;
1503}
1504
1505/*
1506 * For aggregation from software buffer queue.
1507 * NB: must be called with txq lock held
1508 */
1509
1510static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1511 struct ath_atx_tid *tid,
1512 struct list_head *bf_q,
1513 struct ath_buf **bf_last,
1514 struct aggr_rifs_param *param,
1515 int *prev_frames)
1516{
1517#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1518 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
1519 struct list_head bf_head;
1520 int rl = 0, nframes = 0, ndelim;
1521 u16 aggr_limit = 0, al = 0, bpad = 0,
1522 al_delta, h_baw = tid->baw_size / 2;
1523 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
1524 int prev_al = 0, is_ds_rate = 0;
1525 INIT_LIST_HEAD(&bf_head);
1526
1527 BUG_ON(list_empty(&tid->buf_q));
1528
1529 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1530
1531 do {
1532 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1533
1534 /*
1535 * do not step over block-ack window
1536 */
1537 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1538 status = ATH_AGGR_BAW_CLOSED;
1539 break;
1540 }
1541
1542 if (!rl) {
Johannes Bergae5eb022008-10-14 16:58:37 +02001543 aggr_limit = ath_lookup_rate(sc, bf, tid);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001544 rl = 1;
1545 /*
1546 * Is rate dual stream
1547 */
1548 is_ds_rate =
1549 (bf->bf_rcs[0].flags & ATH_RC_DS_FLAG) ? 1 : 0;
1550 }
1551
1552 /*
1553 * do not exceed aggregation limit
1554 */
1555 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1556
1557 if (nframes && (aggr_limit <
1558 (al + bpad + al_delta + prev_al))) {
1559 status = ATH_AGGR_LIMITED;
1560 break;
1561 }
1562
1563 /*
1564 * do not exceed subframe limit
1565 */
1566 if ((nframes + *prev_frames) >=
1567 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1568 status = ATH_AGGR_LIMITED;
1569 break;
1570 }
1571
1572 /*
1573 * add padding for previous frame to aggregation length
1574 */
1575 al += bpad + al_delta;
1576
1577 /*
1578 * Get the delimiters needed to meet the MPDU
1579 * density for this node.
1580 */
Johannes Bergae5eb022008-10-14 16:58:37 +02001581 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001582
1583 bpad = PADBYTES(al_delta) + (ndelim << 2);
1584
1585 bf->bf_next = NULL;
1586 bf->bf_lastfrm->bf_desc->ds_link = 0;
1587
1588 /*
1589 * this packet is part of an aggregate
1590 * - remove all descriptors belonging to this frame from
1591 * software queue
1592 * - add it to block ack window
1593 * - set up descriptors for aggregation
1594 */
1595 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1596 ath_tx_addto_baw(sc, tid, bf);
1597
1598 list_for_each_entry(tbf, &bf_head, list) {
1599 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1600 tbf->bf_desc, ndelim);
1601 }
1602
1603 /*
1604 * link buffers of this frame to the aggregate
1605 */
1606 list_splice_tail_init(&bf_head, bf_q);
1607 nframes++;
1608
1609 if (bf_prev) {
1610 bf_prev->bf_next = bf;
1611 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
1612 }
1613 bf_prev = bf;
1614
1615#ifdef AGGR_NOSHORT
1616 /*
1617 * terminate aggregation on a small packet boundary
1618 */
1619 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1620 status = ATH_AGGR_SHORTPKT;
1621 break;
1622 }
1623#endif
1624 } while (!list_empty(&tid->buf_q));
1625
1626 bf_first->bf_al = al;
1627 bf_first->bf_nframes = nframes;
1628 *bf_last = bf_prev;
1629 return status;
1630#undef PADBYTES
1631}
1632
1633/*
1634 * process pending frames possibly doing a-mpdu aggregation
1635 * NB: must be called with txq lock held
1636 */
1637
1638static void ath_tx_sched_aggr(struct ath_softc *sc,
1639 struct ath_txq *txq, struct ath_atx_tid *tid)
1640{
1641 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
1642 enum ATH_AGGR_STATUS status;
1643 struct list_head bf_q;
1644 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1645 int prev_frames = 0;
1646
1647 do {
1648 if (list_empty(&tid->buf_q))
1649 return;
1650
1651 INIT_LIST_HEAD(&bf_q);
1652
1653 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, &param,
1654 &prev_frames);
1655
1656 /*
1657 * no frames picked up to be aggregated; block-ack
1658 * window is not open
1659 */
1660 if (list_empty(&bf_q))
1661 break;
1662
1663 bf = list_first_entry(&bf_q, struct ath_buf, list);
1664 bf_last = list_entry(bf_q.prev, struct ath_buf, list);
1665 bf->bf_lastbf = bf_last;
1666
1667 /*
1668 * if only one frame, send as non-aggregate
1669 */
1670 if (bf->bf_nframes == 1) {
1671 ASSERT(bf->bf_lastfrm == bf_last);
1672
Sujithcd3d39a2008-08-11 14:03:34 +05301673 bf->bf_state.bf_type &= ~BUF_AGGR;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001674 /*
1675 * clear aggr bits for every descriptor
1676 * XXX TODO: is there a way to optimize it?
1677 */
1678 list_for_each_entry(tbf, &bf_q, list) {
1679 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1680 }
1681
1682 ath_buf_set_rate(sc, bf);
1683 ath_tx_txqaddbuf(sc, txq, &bf_q);
1684 continue;
1685 }
1686
1687 /*
1688 * setup first desc with rate and aggr info
1689 */
Sujithcd3d39a2008-08-11 14:03:34 +05301690 bf->bf_state.bf_type |= BUF_AGGR;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001691 ath_buf_set_rate(sc, bf);
1692 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1693
1694 /*
1695 * anchor last frame of aggregate correctly
1696 */
1697 ASSERT(bf_lastaggr);
1698 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1699 tbf = bf_lastaggr;
1700 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1701
1702 /* XXX: We don't enter into this loop, consider removing this */
1703 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1704 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1705 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1706 }
1707
1708 txq->axq_aggr_depth++;
1709
1710 /*
1711 * Normal aggregate, queue to hardware
1712 */
1713 ath_tx_txqaddbuf(sc, txq, &bf_q);
1714
1715 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1716 status != ATH_AGGR_BAW_CLOSED);
1717}
1718
1719/* Called with txq lock held */
1720
1721static void ath_tid_drain(struct ath_softc *sc,
1722 struct ath_txq *txq,
Sujithb5aa9bf2008-10-29 10:13:31 +05301723 struct ath_atx_tid *tid)
1724
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001725{
1726 struct ath_buf *bf;
1727 struct list_head bf_head;
1728 INIT_LIST_HEAD(&bf_head);
1729
1730 for (;;) {
1731 if (list_empty(&tid->buf_q))
1732 break;
1733 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1734
1735 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1736
1737 /* update baw for software retried frame */
Sujithcd3d39a2008-08-11 14:03:34 +05301738 if (bf_isretried(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001739 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1740
1741 /*
1742 * do not indicate packets while holding txq spinlock.
1743 * unlock is intentional here
1744 */
Sujithb5aa9bf2008-10-29 10:13:31 +05301745 spin_unlock(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001746
1747 /* complete this sub-frame */
1748 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1749
Sujithb5aa9bf2008-10-29 10:13:31 +05301750 spin_lock(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001751 }
1752
1753 /*
1754 * TODO: For frame(s) that are in the retry state, we will reuse the
1755 * sequence number(s) without setting the retry bit. The
1756 * alternative is to give up on these and BAR the receiver's window
1757 * forward.
1758 */
1759 tid->seq_next = tid->seq_start;
1760 tid->baw_tail = tid->baw_head;
1761}
1762
1763/*
1764 * Drain all pending buffers
1765 * NB: must be called with txq lock held
1766 */
1767
1768static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
Sujithb5aa9bf2008-10-29 10:13:31 +05301769 struct ath_txq *txq)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001770{
1771 struct ath_atx_ac *ac, *ac_tmp;
1772 struct ath_atx_tid *tid, *tid_tmp;
1773
1774 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1775 list_del(&ac->list);
1776 ac->sched = false;
1777 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1778 list_del(&tid->list);
1779 tid->sched = false;
Sujithb5aa9bf2008-10-29 10:13:31 +05301780 ath_tid_drain(sc, txq, tid);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001781 }
1782 }
1783}
1784
Sujith528f0c62008-10-29 10:14:26 +05301785static void ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
1786 struct sk_buff *skb, struct scatterlist *sg,
1787 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001788{
Sujith528f0c62008-10-29 10:14:26 +05301789 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1790 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001791 struct ath_tx_info_priv *tx_info_priv;
1792 struct ath_rc_series *rcs;
Sujith528f0c62008-10-29 10:14:26 +05301793 int hdrlen;
1794 __le16 fc;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001795
Sujith528f0c62008-10-29 10:14:26 +05301796 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
1797 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1798 fc = hdr->frame_control;
1799 rcs = tx_info_priv->rcs;
Jouni Malinene022edb2008-08-22 17:31:33 +03001800
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001801 ATH_TXBUF_RESET(bf);
Sujith528f0c62008-10-29 10:14:26 +05301802
1803 /* Frame type */
1804
1805 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
Sujithcd3d39a2008-08-11 14:03:34 +05301806
1807 ieee80211_is_data(fc) ?
1808 (bf->bf_state.bf_type |= BUF_DATA) :
1809 (bf->bf_state.bf_type &= ~BUF_DATA);
1810 ieee80211_is_back_req(fc) ?
1811 (bf->bf_state.bf_type |= BUF_BAR) :
1812 (bf->bf_state.bf_type &= ~BUF_BAR);
1813 ieee80211_is_pspoll(fc) ?
1814 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1815 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
Sujith672840a2008-08-11 14:05:08 +05301816 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
Sujithcd3d39a2008-08-11 14:03:34 +05301817 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1818 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
Sujith528f0c62008-10-29 10:14:26 +05301819 (sc->hw->conf.ht.enabled &&
1820 (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ?
1821 (bf->bf_state.bf_type |= BUF_HT) :
1822 (bf->bf_state.bf_type &= ~BUF_HT);
Sujithcd3d39a2008-08-11 14:03:34 +05301823
Sujith528f0c62008-10-29 10:14:26 +05301824 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
1825
1826 /* Crypto */
1827
1828 bf->bf_keytype = get_hw_crypto_keytype(skb);
1829
1830 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1831 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1832 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1833 } else {
1834 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1835 }
1836
1837 /* Rate series */
1838
1839 setup_rate_retries(sc, skb);
1840
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001841 bf->bf_rcs[0] = rcs[0];
1842 bf->bf_rcs[1] = rcs[1];
1843 bf->bf_rcs[2] = rcs[2];
1844 bf->bf_rcs[3] = rcs[3];
Sujith528f0c62008-10-29 10:14:26 +05301845
1846 /* Assign seqno, tidno */
1847
1848 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR))
1849 assign_aggr_tid_seqno(skb, bf);
1850
1851 /* DMA setup */
1852
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001853 bf->bf_mpdu = skb;
Sujith528f0c62008-10-29 10:14:26 +05301854 bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
1855 skb->len, PCI_DMA_TODEVICE);
1856 bf->bf_buf_addr = bf->bf_dmacontext;
1857}
1858
1859/* FIXME: tx power */
1860static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1861 struct scatterlist *sg, u32 n_sg,
1862 struct ath_tx_control *txctl)
1863{
1864 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
1865 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1866 struct ath_node *an = NULL;
1867 struct list_head bf_head;
1868 struct ath_desc *ds;
1869 struct ath_atx_tid *tid;
1870 struct ath_hal *ah = sc->sc_ah;
1871 int frm_type;
1872
1873 if (tx_info->control.sta) {
1874 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1875 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1876 }
1877
1878 frm_type = get_hw_packet_type(skb);
1879
1880 INIT_LIST_HEAD(&bf_head);
1881 list_add_tail(&bf->list, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001882
1883 /* setup descriptor */
Sujith528f0c62008-10-29 10:14:26 +05301884
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001885 ds = bf->bf_desc;
1886 ds->ds_link = 0;
1887 ds->ds_data = bf->bf_buf_addr;
1888
Sujith528f0c62008-10-29 10:14:26 +05301889 /* Formulate first tx descriptor with tx controls */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001890
Sujith528f0c62008-10-29 10:14:26 +05301891 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1892 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1893
1894 ath9k_hw_filltxdesc(ah, ds,
1895 sg_dma_len(sg), /* segment length */
1896 true, /* first segment */
1897 (n_sg == 1) ? true : false, /* last segment */
1898 ds); /* first descriptor */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001899
1900 bf->bf_lastfrm = bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001901
Sujith528f0c62008-10-29 10:14:26 +05301902 spin_lock_bh(&txctl->txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001903
Sujith528f0c62008-10-29 10:14:26 +05301904 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR)) {
1905 if (ath_aggr_query(sc, an, bf->bf_tidno)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001906 /*
1907 * Try aggregation if it's a unicast data frame
1908 * and the destination is HT capable.
1909 */
Sujith528f0c62008-10-29 10:14:26 +05301910 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001911 } else {
1912 /*
Sujith528f0c62008-10-29 10:14:26 +05301913 * Send this frame as regular when ADDBA
1914 * exchange is neither complete nor pending.
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001915 */
Sujith528f0c62008-10-29 10:14:26 +05301916 ath_tx_send_normal(sc, txctl->txq,
1917 tid, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001918 }
1919 } else {
1920 bf->bf_lastbf = bf;
1921 bf->bf_nframes = 1;
Sujith528f0c62008-10-29 10:14:26 +05301922
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001923 ath_buf_set_rate(sc, bf);
Sujith528f0c62008-10-29 10:14:26 +05301924 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001925 }
Sujith528f0c62008-10-29 10:14:26 +05301926
1927 spin_unlock_bh(&txctl->txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001928}
1929
Sujith528f0c62008-10-29 10:14:26 +05301930int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
1931 struct ath_tx_control *txctl)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001932{
Sujith528f0c62008-10-29 10:14:26 +05301933 struct ath_buf *bf;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001934 struct scatterlist sg;
1935
Sujith528f0c62008-10-29 10:14:26 +05301936 /* Check if a tx buffer is available */
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001937
Sujith528f0c62008-10-29 10:14:26 +05301938 bf = ath_tx_get_buffer(sc);
1939 if (!bf) {
1940 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX buffers are full\n",
1941 __func__);
1942 return -1;
1943 }
1944
1945 ath_tx_setup_buffer(sc, bf, skb, &sg, txctl);
1946
1947 /* Setup S/G */
1948
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001949 memset(&sg, 0, sizeof(struct scatterlist));
Sujith528f0c62008-10-29 10:14:26 +05301950 sg_dma_address(&sg) = bf->bf_dmacontext;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001951 sg_dma_len(&sg) = skb->len;
1952
Sujith528f0c62008-10-29 10:14:26 +05301953 ath_tx_start_dma(sc, bf, &sg, 1, txctl);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001954
Sujith528f0c62008-10-29 10:14:26 +05301955 return 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001956}
1957
1958/* Initialize TX queue and h/w */
1959
1960int ath_tx_init(struct ath_softc *sc, int nbufs)
1961{
1962 int error = 0;
1963
1964 do {
1965 spin_lock_init(&sc->sc_txbuflock);
1966
1967 /* Setup tx descriptors */
1968 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
Sujith556bb8f2008-08-11 14:03:53 +05301969 "tx", nbufs, 1);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07001970 if (error != 0) {
1971 DPRINTF(sc, ATH_DBG_FATAL,
1972 "%s: failed to allocate tx descriptors: %d\n",
1973 __func__, error);
1974 break;
1975 }
1976
1977 /* XXX allocate beacon state together with vap */
1978 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
1979 "beacon", ATH_BCBUF, 1);
1980 if (error != 0) {
1981 DPRINTF(sc, ATH_DBG_FATAL,
1982 "%s: failed to allocate "
1983 "beacon descripotrs: %d\n",
1984 __func__, error);
1985 break;
1986 }
1987
1988 } while (0);
1989
1990 if (error != 0)
1991 ath_tx_cleanup(sc);
1992
1993 return error;
1994}
1995
1996/* Reclaim all tx queue resources */
1997
1998int ath_tx_cleanup(struct ath_softc *sc)
1999{
2000 /* cleanup beacon descriptors */
2001 if (sc->sc_bdma.dd_desc_len != 0)
2002 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
2003
2004 /* cleanup tx descriptors */
2005 if (sc->sc_txdma.dd_desc_len != 0)
2006 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
2007
2008 return 0;
2009}
2010
2011/* Setup a h/w transmit queue */
2012
2013struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
2014{
2015 struct ath_hal *ah = sc->sc_ah;
Sujithea9880f2008-08-07 10:53:10 +05302016 struct ath9k_tx_queue_info qi;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002017 int qnum;
2018
Luis R. Rodriguez0345f372008-10-03 15:45:25 -07002019 memset(&qi, 0, sizeof(qi));
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002020 qi.tqi_subtype = subtype;
2021 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
2022 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
2023 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
Sujithea9880f2008-08-07 10:53:10 +05302024 qi.tqi_physCompBuf = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002025
2026 /*
2027 * Enable interrupts only for EOL and DESC conditions.
2028 * We mark tx descriptors to receive a DESC interrupt
2029 * when a tx queue gets deep; otherwise waiting for the
2030 * EOL to reap descriptors. Note that this is done to
2031 * reduce interrupt load and this only defers reaping
2032 * descriptors, never transmitting frames. Aside from
2033 * reducing interrupts this also permits more concurrency.
2034 * The only potential downside is if the tx queue backs
2035 * up in which case the top half of the kernel may backup
2036 * due to a lack of tx descriptors.
2037 *
2038 * The UAPSD queue is an exception, since we take a desc-
2039 * based intr on the EOSP frames.
2040 */
2041 if (qtype == ATH9K_TX_QUEUE_UAPSD)
2042 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
2043 else
2044 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
2045 TXQ_FLAG_TXDESCINT_ENABLE;
2046 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
2047 if (qnum == -1) {
2048 /*
2049 * NB: don't print a message, this happens
2050 * normally on parts with too few tx queues
2051 */
2052 return NULL;
2053 }
2054 if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
2055 DPRINTF(sc, ATH_DBG_FATAL,
2056 "%s: hal qnum %u out of range, max %u!\n",
2057 __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
2058 ath9k_hw_releasetxqueue(ah, qnum);
2059 return NULL;
2060 }
2061 if (!ATH_TXQ_SETUP(sc, qnum)) {
2062 struct ath_txq *txq = &sc->sc_txq[qnum];
2063
2064 txq->axq_qnum = qnum;
2065 txq->axq_link = NULL;
2066 INIT_LIST_HEAD(&txq->axq_q);
2067 INIT_LIST_HEAD(&txq->axq_acq);
2068 spin_lock_init(&txq->axq_lock);
2069 txq->axq_depth = 0;
2070 txq->axq_aggr_depth = 0;
2071 txq->axq_totalqueued = 0;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002072 txq->axq_linkbuf = NULL;
2073 sc->sc_txqsetup |= 1<<qnum;
2074 }
2075 return &sc->sc_txq[qnum];
2076}
2077
2078/* Reclaim resources for a setup queue */
2079
2080void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
2081{
2082 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
2083 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
2084}
2085
2086/*
2087 * Setup a hardware data transmit queue for the specified
2088 * access control. The hal may not support all requested
2089 * queues in which case it will return a reference to a
2090 * previously setup queue. We record the mapping from ac's
2091 * to h/w queues for use by ath_tx_start and also track
2092 * the set of h/w queues being used to optimize work in the
2093 * transmit interrupt handler and related routines.
2094 */
2095
2096int ath_tx_setup(struct ath_softc *sc, int haltype)
2097{
2098 struct ath_txq *txq;
2099
2100 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
2101 DPRINTF(sc, ATH_DBG_FATAL,
2102 "%s: HAL AC %u out of range, max %zu!\n",
2103 __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q));
2104 return 0;
2105 }
2106 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
2107 if (txq != NULL) {
2108 sc->sc_haltype2q[haltype] = txq->axq_qnum;
2109 return 1;
2110 } else
2111 return 0;
2112}
2113
2114int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
2115{
2116 int qnum;
2117
2118 switch (qtype) {
2119 case ATH9K_TX_QUEUE_DATA:
2120 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
2121 DPRINTF(sc, ATH_DBG_FATAL,
2122 "%s: HAL AC %u out of range, max %zu!\n",
2123 __func__,
2124 haltype, ARRAY_SIZE(sc->sc_haltype2q));
2125 return -1;
2126 }
2127 qnum = sc->sc_haltype2q[haltype];
2128 break;
2129 case ATH9K_TX_QUEUE_BEACON:
2130 qnum = sc->sc_bhalq;
2131 break;
2132 case ATH9K_TX_QUEUE_CAB:
2133 qnum = sc->sc_cabq->axq_qnum;
2134 break;
2135 default:
2136 qnum = -1;
2137 }
2138 return qnum;
2139}
2140
Sujith528f0c62008-10-29 10:14:26 +05302141/* Get a transmit queue, if available */
2142
2143struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
2144{
2145 struct ath_txq *txq = NULL;
2146 int qnum;
2147
2148 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
2149 txq = &sc->sc_txq[qnum];
2150
2151 spin_lock_bh(&txq->axq_lock);
2152
2153 /* Try to avoid running out of descriptors */
2154 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
2155 DPRINTF(sc, ATH_DBG_FATAL,
2156 "%s: TX queue: %d is full, depth: %d\n",
2157 __func__, qnum, txq->axq_depth);
2158 ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
2159 txq->stopped = 1;
2160 spin_unlock_bh(&txq->axq_lock);
2161 return NULL;
2162 }
2163
2164 spin_unlock_bh(&txq->axq_lock);
2165
2166 return txq;
2167}
2168
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002169/* Update parameters for a transmit queue */
2170
Sujithea9880f2008-08-07 10:53:10 +05302171int ath_txq_update(struct ath_softc *sc, int qnum,
2172 struct ath9k_tx_queue_info *qinfo)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002173{
2174 struct ath_hal *ah = sc->sc_ah;
2175 int error = 0;
Sujithea9880f2008-08-07 10:53:10 +05302176 struct ath9k_tx_queue_info qi;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002177
2178 if (qnum == sc->sc_bhalq) {
2179 /*
2180 * XXX: for beacon queue, we just save the parameter.
2181 * It will be picked up by ath_beaconq_config when
2182 * it's necessary.
2183 */
Sujithea9880f2008-08-07 10:53:10 +05302184 sc->sc_beacon_qi = *qinfo;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002185 return 0;
2186 }
2187
2188 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
2189
Sujithea9880f2008-08-07 10:53:10 +05302190 ath9k_hw_get_txq_props(ah, qnum, &qi);
2191 qi.tqi_aifs = qinfo->tqi_aifs;
2192 qi.tqi_cwmin = qinfo->tqi_cwmin;
2193 qi.tqi_cwmax = qinfo->tqi_cwmax;
2194 qi.tqi_burstTime = qinfo->tqi_burstTime;
2195 qi.tqi_readyTime = qinfo->tqi_readyTime;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002196
Sujithea9880f2008-08-07 10:53:10 +05302197 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002198 DPRINTF(sc, ATH_DBG_FATAL,
2199 "%s: unable to update hardware queue %u!\n",
2200 __func__, qnum);
2201 error = -EIO;
2202 } else {
2203 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
2204 }
2205
2206 return error;
2207}
2208
2209int ath_cabq_update(struct ath_softc *sc)
2210{
Sujithea9880f2008-08-07 10:53:10 +05302211 struct ath9k_tx_queue_info qi;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002212 int qnum = sc->sc_cabq->axq_qnum;
2213 struct ath_beacon_config conf;
2214
Sujithea9880f2008-08-07 10:53:10 +05302215 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002216 /*
2217 * Ensure the readytime % is within the bounds.
2218 */
2219 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2220 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2221 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2222 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2223
2224 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2225 qi.tqi_readyTime =
2226 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
2227 ath_txq_update(sc, qnum, &qi);
2228
2229 return 0;
2230}
2231
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002232/* Deferred processing of transmit interrupt */
2233
2234void ath_tx_tasklet(struct ath_softc *sc)
2235{
Sujith1fe11322008-08-26 08:11:06 +05302236 int i;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002237 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2238
2239 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2240
2241 /*
2242 * Process each active queue.
2243 */
2244 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2245 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
Sujith1fe11322008-08-26 08:11:06 +05302246 ath_tx_processq(sc, &sc->sc_txq[i]);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002247 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002248}
2249
2250void ath_tx_draintxq(struct ath_softc *sc,
2251 struct ath_txq *txq, bool retry_tx)
2252{
2253 struct ath_buf *bf, *lastbf;
2254 struct list_head bf_head;
2255
2256 INIT_LIST_HEAD(&bf_head);
2257
2258 /*
2259 * NB: this assumes output has been stopped and
2260 * we do not need to block ath_tx_tasklet
2261 */
2262 for (;;) {
2263 spin_lock_bh(&txq->axq_lock);
2264
2265 if (list_empty(&txq->axq_q)) {
2266 txq->axq_link = NULL;
2267 txq->axq_linkbuf = NULL;
2268 spin_unlock_bh(&txq->axq_lock);
2269 break;
2270 }
2271
2272 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2273
2274 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2275 list_del(&bf->list);
2276 spin_unlock_bh(&txq->axq_lock);
2277
2278 spin_lock_bh(&sc->sc_txbuflock);
2279 list_add_tail(&bf->list, &sc->sc_txbuf);
2280 spin_unlock_bh(&sc->sc_txbuflock);
2281 continue;
2282 }
2283
2284 lastbf = bf->bf_lastbf;
2285 if (!retry_tx)
2286 lastbf->bf_desc->ds_txstat.ts_flags =
2287 ATH9K_TX_SW_ABORTED;
2288
2289 /* remove ath_buf's of the same mpdu from txq */
2290 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
2291 txq->axq_depth--;
2292
2293 spin_unlock_bh(&txq->axq_lock);
2294
Sujithcd3d39a2008-08-11 14:03:34 +05302295 if (bf_isampdu(bf))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002296 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2297 else
2298 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2299 }
2300
2301 /* flush any pending frames if aggregation is enabled */
Sujith672840a2008-08-11 14:05:08 +05302302 if (sc->sc_flags & SC_OP_TXAGGR) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002303 if (!retry_tx) {
2304 spin_lock_bh(&txq->axq_lock);
Sujithb5aa9bf2008-10-29 10:13:31 +05302305 ath_txq_drain_pending_buffers(sc, txq);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002306 spin_unlock_bh(&txq->axq_lock);
2307 }
2308 }
2309}
2310
2311/* Drain the transmit queues and reclaim resources */
2312
2313void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2314{
2315 /* stop beacon queue. The beacon will be freed when
2316 * we go to INIT state */
Sujith672840a2008-08-11 14:05:08 +05302317 if (!(sc->sc_flags & SC_OP_INVALID)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002318 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
2319 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
2320 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
2321 }
2322
2323 ath_drain_txdataq(sc, retry_tx);
2324}
2325
2326u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2327{
2328 return sc->sc_txq[qnum].axq_depth;
2329}
2330
2331u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2332{
2333 return sc->sc_txq[qnum].axq_aggr_depth;
2334}
2335
2336/* Check if an ADDBA is required. A valid node must be passed. */
2337enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
2338 struct ath_node *an,
2339 u8 tidno)
2340{
2341 struct ath_atx_tid *txtid;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002342
Sujith672840a2008-08-11 14:05:08 +05302343 if (!(sc->sc_flags & SC_OP_TXAGGR))
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002344 return AGGR_NOT_REQUIRED;
2345
2346 /* ADDBA exchange must be completed before sending aggregates */
2347 txtid = ATH_AN_2_TID(an, tidno);
2348
2349 if (txtid->addba_exchangecomplete)
2350 return AGGR_EXCHANGE_DONE;
2351
2352 if (txtid->cleanup_inprogress)
2353 return AGGR_CLEANUP_PROGRESS;
2354
2355 if (txtid->addba_exchangeinprogress)
2356 return AGGR_EXCHANGE_PROGRESS;
2357
2358 if (!txtid->addba_exchangecomplete) {
2359 if (!txtid->addba_exchangeinprogress &&
2360 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2361 txtid->addba_exchangeattempts++;
2362 return AGGR_REQUIRED;
2363 }
2364 }
2365
2366 return AGGR_NOT_REQUIRED;
2367}
2368
2369/* Start TX aggregation */
2370
Sujithb5aa9bf2008-10-29 10:13:31 +05302371int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
2372 u16 tid, u16 *ssn)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002373{
2374 struct ath_atx_tid *txtid;
2375 struct ath_node *an;
2376
Sujithb5aa9bf2008-10-29 10:13:31 +05302377 an = (struct ath_node *)sta->drv_priv;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002378
Sujith672840a2008-08-11 14:05:08 +05302379 if (sc->sc_flags & SC_OP_TXAGGR) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002380 txtid = ATH_AN_2_TID(an, tid);
2381 txtid->addba_exchangeinprogress = 1;
2382 ath_tx_pause_tid(sc, txtid);
2383 }
2384
2385 return 0;
2386}
2387
2388/* Stop tx aggregation */
2389
Sujithb5aa9bf2008-10-29 10:13:31 +05302390int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002391{
Sujithb5aa9bf2008-10-29 10:13:31 +05302392 struct ath_node *an = (struct ath_node *)sta->drv_priv;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002393
2394 ath_tx_aggr_teardown(sc, an, tid);
2395 return 0;
2396}
2397
2398/*
2399 * Performs transmit side cleanup when TID changes from aggregated to
2400 * unaggregated.
2401 * - Pause the TID and mark cleanup in progress
2402 * - Discard all retry frames from the s/w queue.
2403 */
2404
Sujithb5aa9bf2008-10-29 10:13:31 +05302405void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002406{
2407 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2408 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
2409 struct ath_buf *bf;
2410 struct list_head bf_head;
2411 INIT_LIST_HEAD(&bf_head);
2412
2413 DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__);
2414
2415 if (txtid->cleanup_inprogress) /* cleanup is in progress */
2416 return;
2417
2418 if (!txtid->addba_exchangecomplete) {
2419 txtid->addba_exchangeattempts = 0;
2420 return;
2421 }
2422
2423 /* TID must be paused first */
2424 ath_tx_pause_tid(sc, txtid);
2425
2426 /* drop all software retried frames and mark this TID */
2427 spin_lock_bh(&txq->axq_lock);
2428 while (!list_empty(&txtid->buf_q)) {
2429 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
Sujithcd3d39a2008-08-11 14:03:34 +05302430 if (!bf_isretried(bf)) {
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002431 /*
2432 * NB: it's based on the assumption that
2433 * software retried frame will always stay
2434 * at the head of software queue.
2435 */
2436 break;
2437 }
2438 list_cut_position(&bf_head,
2439 &txtid->buf_q, &bf->bf_lastfrm->list);
2440 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
2441
2442 /* complete this sub-frame */
2443 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2444 }
2445
2446 if (txtid->baw_head != txtid->baw_tail) {
2447 spin_unlock_bh(&txq->axq_lock);
2448 txtid->cleanup_inprogress = true;
2449 } else {
2450 txtid->addba_exchangecomplete = 0;
2451 txtid->addba_exchangeattempts = 0;
2452 spin_unlock_bh(&txq->axq_lock);
2453 ath_tx_flush_tid(sc, txtid);
2454 }
2455}
2456
2457/*
2458 * Tx scheduling logic
2459 * NB: must be called with txq lock held
2460 */
2461
2462void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2463{
2464 struct ath_atx_ac *ac;
2465 struct ath_atx_tid *tid;
2466
2467 /* nothing to schedule */
2468 if (list_empty(&txq->axq_acq))
2469 return;
2470 /*
2471 * get the first node/ac pair on the queue
2472 */
2473 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2474 list_del(&ac->list);
2475 ac->sched = false;
2476
2477 /*
2478 * process a single tid per destination
2479 */
2480 do {
2481 /* nothing to schedule */
2482 if (list_empty(&ac->tid_q))
2483 return;
2484
2485 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
2486 list_del(&tid->list);
2487 tid->sched = false;
2488
2489 if (tid->paused) /* check next tid to keep h/w busy */
2490 continue;
2491
Sujith43453b32008-10-29 10:14:52 +05302492 if ((txq->axq_depth % 2) == 0)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002493 ath_tx_sched_aggr(sc, txq, tid);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002494
2495 /*
2496 * add tid to round-robin queue if more frames
2497 * are pending for the tid
2498 */
2499 if (!list_empty(&tid->buf_q))
2500 ath_tx_queue_tid(txq, tid);
2501
2502 /* only schedule one TID at a time */
2503 break;
2504 } while (!list_empty(&ac->tid_q));
2505
2506 /*
2507 * schedule AC if more TIDs need processing
2508 */
2509 if (!list_empty(&ac->tid_q)) {
2510 /*
2511 * add dest ac to txq if not already added
2512 */
2513 if (!ac->sched) {
2514 ac->sched = true;
2515 list_add_tail(&ac->list, &txq->axq_acq);
2516 }
2517 }
2518}
2519
2520/* Initialize per-node transmit state */
2521
2522void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2523{
Sujithc5170162008-10-29 10:13:59 +05302524 struct ath_atx_tid *tid;
2525 struct ath_atx_ac *ac;
2526 int tidno, acno;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002527
Sujithc5170162008-10-29 10:13:59 +05302528 /*
2529 * Init per tid tx state
2530 */
2531 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2532 tidno < WME_NUM_TID;
2533 tidno++, tid++) {
2534 tid->an = an;
2535 tid->tidno = tidno;
2536 tid->seq_start = tid->seq_next = 0;
2537 tid->baw_size = WME_MAX_BA;
2538 tid->baw_head = tid->baw_tail = 0;
2539 tid->sched = false;
2540 tid->paused = false;
2541 tid->cleanup_inprogress = false;
2542 INIT_LIST_HEAD(&tid->buf_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002543
Sujithc5170162008-10-29 10:13:59 +05302544 acno = TID_TO_WME_AC(tidno);
2545 tid->ac = &an->an_aggr.tx.ac[acno];
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002546
Sujithc5170162008-10-29 10:13:59 +05302547 /* ADDBA state */
2548 tid->addba_exchangecomplete = 0;
2549 tid->addba_exchangeinprogress = 0;
2550 tid->addba_exchangeattempts = 0;
2551 }
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002552
Sujithc5170162008-10-29 10:13:59 +05302553 /*
2554 * Init per ac tx state
2555 */
2556 for (acno = 0, ac = &an->an_aggr.tx.ac[acno];
2557 acno < WME_NUM_AC; acno++, ac++) {
2558 ac->sched = false;
2559 INIT_LIST_HEAD(&ac->tid_q);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002560
Sujithc5170162008-10-29 10:13:59 +05302561 switch (acno) {
2562 case WME_AC_BE:
2563 ac->qnum = ath_tx_get_qnum(sc,
2564 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2565 break;
2566 case WME_AC_BK:
2567 ac->qnum = ath_tx_get_qnum(sc,
2568 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2569 break;
2570 case WME_AC_VI:
2571 ac->qnum = ath_tx_get_qnum(sc,
2572 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2573 break;
2574 case WME_AC_VO:
2575 ac->qnum = ath_tx_get_qnum(sc,
2576 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2577 break;
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002578 }
2579 }
2580}
2581
2582/* Cleanupthe pending buffers for the node. */
2583
Sujithb5aa9bf2008-10-29 10:13:31 +05302584void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002585{
2586 int i;
2587 struct ath_atx_ac *ac, *ac_tmp;
2588 struct ath_atx_tid *tid, *tid_tmp;
2589 struct ath_txq *txq;
2590 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2591 if (ATH_TXQ_SETUP(sc, i)) {
2592 txq = &sc->sc_txq[i];
2593
Sujithb5aa9bf2008-10-29 10:13:31 +05302594 spin_lock(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002595
2596 list_for_each_entry_safe(ac,
2597 ac_tmp, &txq->axq_acq, list) {
2598 tid = list_first_entry(&ac->tid_q,
2599 struct ath_atx_tid, list);
2600 if (tid && tid->an != an)
2601 continue;
2602 list_del(&ac->list);
2603 ac->sched = false;
2604
2605 list_for_each_entry_safe(tid,
2606 tid_tmp, &ac->tid_q, list) {
2607 list_del(&tid->list);
2608 tid->sched = false;
Sujithb5aa9bf2008-10-29 10:13:31 +05302609 ath_tid_drain(sc, txq, tid);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002610 tid->addba_exchangecomplete = 0;
2611 tid->addba_exchangeattempts = 0;
2612 tid->cleanup_inprogress = false;
2613 }
2614 }
2615
Sujithb5aa9bf2008-10-29 10:13:31 +05302616 spin_unlock(&txq->axq_lock);
Luis R. Rodriguezf078f202008-08-04 00:16:41 -07002617 }
2618 }
2619}
2620
Jouni Malinene022edb2008-08-22 17:31:33 +03002621void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2622{
2623 int hdrlen, padsize;
2624 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2625 struct ath_tx_control txctl;
2626
Sujith528f0c62008-10-29 10:14:26 +05302627 memset(&txctl, 0, sizeof(struct ath_tx_control));
2628
Jouni Malinene022edb2008-08-22 17:31:33 +03002629 /*
2630 * As a temporary workaround, assign seq# here; this will likely need
2631 * to be cleaned up to work better with Beacon transmission and virtual
2632 * BSSes.
2633 */
2634 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2635 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2636 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2637 sc->seq_no += 0x10;
2638 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2639 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
2640 }
2641
2642 /* Add the padding after the header if this is not already done */
2643 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2644 if (hdrlen & 3) {
2645 padsize = hdrlen % 4;
2646 if (skb_headroom(skb) < padsize) {
2647 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ padding "
2648 "failed\n", __func__);
2649 dev_kfree_skb_any(skb);
2650 return;
2651 }
2652 skb_push(skb, padsize);
2653 memmove(skb->data, skb->data + padsize, hdrlen);
2654 }
2655
Sujith528f0c62008-10-29 10:14:26 +05302656 txctl.txq = sc->sc_cabq;
2657
Jouni Malinene022edb2008-08-22 17:31:33 +03002658 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n",
2659 __func__,
2660 skb);
2661
Sujith528f0c62008-10-29 10:14:26 +05302662 if (ath_tx_start(sc, skb, &txctl) != 0) {
2663 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__);
2664 goto exit;
Jouni Malinene022edb2008-08-22 17:31:33 +03002665 }
Jouni Malinene022edb2008-08-22 17:31:33 +03002666
Sujith528f0c62008-10-29 10:14:26 +05302667 return;
2668exit:
2669 dev_kfree_skb_any(skb);
2670}