FreeBSD kernel ATH device code
if_ath_tx.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
5 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
16 * redistribution must be conditioned upon including a substantially
17 * similar Disclaimer requirement for further binary redistribution.
18 *
19 * NO WARRANTY
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
23 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
28 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGES.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD$");
35
36/*
37 * Driver for the Atheros Wireless LAN controller.
38 *
39 * This software is derived from work of Atsushi Onoe; his contribution
40 * is greatly appreciated.
41 */
42
43#include "opt_inet.h"
44#include "opt_ath.h"
45#include "opt_wlan.h"
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/sysctl.h>
50#include <sys/mbuf.h>
51#include <sys/malloc.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/kernel.h>
55#include <sys/socket.h>
56#include <sys/sockio.h>
57#include <sys/errno.h>
58#include <sys/callout.h>
59#include <sys/bus.h>
60#include <sys/endian.h>
61#include <sys/kthread.h>
62#include <sys/taskqueue.h>
63#include <sys/priv.h>
64#include <sys/ktr.h>
65
66#include <machine/bus.h>
67
68#include <net/if.h>
69#include <net/if_var.h>
70#include <net/if_dl.h>
71#include <net/if_media.h>
72#include <net/if_types.h>
73#include <net/if_arp.h>
74#include <net/ethernet.h>
75#include <net/if_llc.h>
76
77#include <net80211/ieee80211_var.h>
78#include <net80211/ieee80211_regdomain.h>
79#ifdef IEEE80211_SUPPORT_SUPERG
80#include <net80211/ieee80211_superg.h>
81#endif
82#ifdef IEEE80211_SUPPORT_TDMA
83#include <net80211/ieee80211_tdma.h>
84#endif
85#include <net80211/ieee80211_ht.h>
86
87#include <net/bpf.h>
88
89#ifdef INET
90#include <netinet/in.h>
91#include <netinet/if_ether.h>
92#endif
93
94#include <dev/ath/if_athvar.h>
95#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
97
99
100#ifdef ATH_TX99_DIAG
101#include <dev/ath/ath_tx99/ath_tx99.h>
102#endif
103
104#include <dev/ath/if_ath_misc.h>
105#include <dev/ath/if_ath_tx.h>
106#include <dev/ath/if_ath_tx_ht.h>
107
108#ifdef ATH_DEBUG_ALQ
109#include <dev/ath/if_ath_alq.h>
110#endif
111
112/*
113 * How many retries to perform in software
114 */
115#define SWMAX_RETRIES 10
116
117/*
118 * What queue to throw the non-QoS TID traffic into
119 */
120#define ATH_NONQOS_TID_AC WME_AC_VO
121
122#if 0
123static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an);
124#endif
125static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,
126 int tid);
127static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,
128 int tid);
129static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,
130 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);
132 struct ieee80211_node *ni, struct mbuf *m0, int *tid);
133static struct ath_buf *
134ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
135 struct ath_tid *tid, struct ath_buf *bf);
136
137#ifdef ATH_DEBUG_ALQ
138void
139ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first)
140{
141 struct ath_buf *bf;
142 int i, n;
143 const char *ds;
144
145 /* XXX we should skip out early if debugging isn't enabled! */
146 bf = bf_first;
147
148 while (bf != NULL) {
149 /* XXX should ensure bf_nseg > 0! */
150 if (bf->bf_nseg == 0)
151 break;
152 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;
153 for (i = 0, ds = (const char *) bf->bf_desc;
154 i < n;
155 i++, ds += sc->sc_tx_desclen) {
156 if_ath_alq_post(&sc->sc_alq,
158 sc->sc_tx_desclen,
159 ds);
160 }
161 bf = bf->bf_next;
162 }
163}
164#endif /* ATH_DEBUG_ALQ */
165
166/*
167 * Whether to use the 11n rate scenario functions or not
168 */
169static inline int
171{
172 return ((sc->sc_ah->ah_magic == 0x20065416) ||
173 (sc->sc_ah->ah_magic == 0x19741014));
174}
175
176/*
177 * Obtain the current TID from the given frame.
178 *
179 * Non-QoS frames get mapped to a TID so frames consistently
180 * go on a sensible queue.
181 */
182static int
183ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
184{
185 const struct ieee80211_frame *wh;
186
187 wh = mtod(m0, const struct ieee80211_frame *);
188
189 /* Non-QoS: map frame to a TID queue for software queueing */
190 if (! IEEE80211_QOS_HAS_SEQ(wh))
191 return (WME_AC_TO_TID(M_WME_GETAC(m0)));
192
193 /* QoS - fetch the TID from the header, ignore mbuf WME */
194 return (ieee80211_gettid(wh));
195}
196
197static void
198ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
199{
200 struct ieee80211_frame *wh;
201
202 wh = mtod(bf->bf_m, struct ieee80211_frame *);
203 /* Only update/resync if needed */
204 if (bf->bf_state.bfs_isretried == 0) {
205 wh->i_fc[1] |= IEEE80211_FC1_RETRY;
206 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
207 BUS_DMASYNC_PREWRITE);
208 }
209 bf->bf_state.bfs_isretried = 1;
210 bf->bf_state.bfs_retries ++;
211}
212
213/*
214 * Determine what the correct AC queue for the given frame
215 * should be.
216 *
217 * For QoS frames, obey the TID. That way things like
218 * management frames that are related to a given TID
219 * are thus serialised with the rest of the TID traffic,
220 * regardless of net80211 overriding priority.
221 *
222 * For non-QoS frames, return the mbuf WMI priority.
223 *
224 * This has implications that higher priority non-QoS traffic
225 * may end up being scheduled before other non-QoS traffic,
226 * leading to out-of-sequence packets being emitted.
227 *
228 * (It'd be nice to log/count this so we can see if it
229 * really is a problem.)
230 *
231 * TODO: maybe we should throw multicast traffic, QoS or
232 * otherwise, into a separate TX queue?
233 */
234static int
235ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
236{
237 const struct ieee80211_frame *wh;
238
239 wh = mtod(m0, const struct ieee80211_frame *);
240
241 /*
242 * QoS data frame (sequence number or otherwise) -
243 * return hardware queue mapping for the underlying
244 * TID.
245 */
246 if (IEEE80211_QOS_HAS_SEQ(wh))
247 return TID_TO_WME_AC(ieee80211_gettid(wh));
248
249 /*
250 * Otherwise - return mbuf QoS pri.
251 */
252 return (M_WME_GETAC(m0));
253}
254
255void
257 ath_bufhead *frags, struct ieee80211_node *ni)
258{
259 struct ath_buf *bf, *next;
260
262
263 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
264 /* NB: bf assumed clean */
265 TAILQ_REMOVE(frags, bf, bf_list);
266 ath_returnbuf_head(sc, bf);
267 ieee80211_node_decref(ni);
268 }
269}
270
271/*
272 * Setup xmit of a fragmented frame. Allocate a buffer
273 * for each frag and bump the node reference count to
274 * reflect the held reference to be setup by ath_tx_start.
275 */
276int
277ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
278 struct mbuf *m0, struct ieee80211_node *ni)
279{
280 struct mbuf *m;
281 struct ath_buf *bf;
282
283 ATH_TXBUF_LOCK(sc);
284 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
285 /* XXX non-management? */
287 if (bf == NULL) { /* out of buffers, cleanup */
288 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n",
289 __func__);
290 ath_txfrag_cleanup(sc, frags, ni);
291 break;
292 }
293 ieee80211_node_incref(ni);
294 TAILQ_INSERT_TAIL(frags, bf, bf_list);
295 }
297
298 return !TAILQ_EMPTY(frags);
299}
300
301static int
302ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
303{
304 struct mbuf *m;
305 int error;
306
307 /*
308 * Load the DMA map so any coalescing is done. This
309 * also calculates the number of descriptors we need.
310 */
311 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
312 bf->bf_segs, &bf->bf_nseg,
313 BUS_DMA_NOWAIT);
314 if (error == EFBIG) {
315 /* XXX packet requires too many descriptors */
316 bf->bf_nseg = ATH_MAX_SCATTER + 1;
317 } else if (error != 0) {
319 ieee80211_free_mbuf(m0);
320 return error;
321 }
322 /*
323 * Discard null packets and check for packets that
324 * require too many TX descriptors. We try to convert
325 * the latter to a cluster.
326 */
327 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */
329 m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER);
330 if (m == NULL) {
331 ieee80211_free_mbuf(m0);
333 return ENOMEM;
334 }
335 m0 = m;
336 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
337 bf->bf_segs, &bf->bf_nseg,
338 BUS_DMA_NOWAIT);
339 if (error != 0) {
341 ieee80211_free_mbuf(m0);
342 return error;
343 }
344 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER,
345 ("too many segments after defrag; nseg %u", bf->bf_nseg));
346 } else if (bf->bf_nseg == 0) { /* null packet, discard */
348 ieee80211_free_mbuf(m0);
349 return EIO;
350 }
351 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
352 __func__, m0, m0->m_pkthdr.len);
353 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
354 bf->bf_m = m0;
355
356 return 0;
357}
358
359/*
360 * Chain together segments+descriptors for a frame - 11n or otherwise.
361 *
362 * For aggregates, this is called on each frame in the aggregate.
363 */
364static void
366 struct ath_buf *bf, bool is_aggr, int is_first_subframe,
367 int is_last_subframe)
368{
369 struct ath_hal *ah = sc->sc_ah;
370 char *ds;
371 int i, bp, dsp;
372 HAL_DMA_ADDR bufAddrList[4];
373 uint32_t segLenList[4];
374 int numTxMaps = 1;
375 int isFirstDesc = 1;
376
377 /*
378 * XXX There's txdma and txdma_mgmt; the descriptor
379 * sizes must match.
380 */
381 struct ath_descdma *dd = &sc->sc_txdma;
382
383 /*
384 * Fillin the remainder of the descriptor info.
385 */
386
387 /*
388 * We need the number of TX data pointers in each descriptor.
389 * EDMA and later chips support 4 TX buffers per descriptor;
390 * previous chips just support one.
391 */
392 numTxMaps = sc->sc_tx_nmaps;
393
394 /*
395 * For EDMA and later chips ensure the TX map is fully populated
396 * before advancing to the next descriptor.
397 */
398 ds = (char *) bf->bf_desc;
399 bp = dsp = 0;
400 bzero(bufAddrList, sizeof(bufAddrList));
401 bzero(segLenList, sizeof(segLenList));
402 for (i = 0; i < bf->bf_nseg; i++) {
403 bufAddrList[bp] = bf->bf_segs[i].ds_addr;
404 segLenList[bp] = bf->bf_segs[i].ds_len;
405 bp++;
406
407 /*
408 * Go to the next segment if this isn't the last segment
409 * and there's space in the current TX map.
410 */
411 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps))
412 continue;
413
414 /*
415 * Last segment or we're out of buffer pointers.
416 */
417 bp = 0;
418
419 if (i == bf->bf_nseg - 1)
420 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0);
421 else
422 ath_hal_settxdesclink(ah, (struct ath_desc *) ds,
423 bf->bf_daddr + dd->dd_descsize * (dsp + 1));
424
425 /*
426 * XXX This assumes that bfs_txq is the actual destination
427 * hardware queue at this point. It may not have been
428 * assigned, it may actually be pointing to the multicast
429 * software TXQ id. These must be fixed!
430 */
431 ath_hal_filltxdesc(ah, (struct ath_desc *) ds
432 , bufAddrList
433 , segLenList
434 , bf->bf_descid /* XXX desc id */
436 , isFirstDesc /* first segment */
437 , i == bf->bf_nseg - 1 /* last segment */
438 , (struct ath_desc *) ds0 /* first descriptor */
439 );
440
441 /*
442 * Make sure the 11n aggregate fields are cleared.
443 *
444 * XXX TODO: this doesn't need to be called for
445 * aggregate frames; as it'll be called on all
446 * sub-frames. Since the descriptors are in
447 * non-cacheable memory, this leads to some
448 * rather slow writes on MIPS/ARM platforms.
449 */
450 if (ath_tx_is_11n(sc))
451 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds);
452
453 /*
454 * If 11n is enabled, set it up as if it's an aggregate
455 * frame.
456 */
457 if (is_last_subframe) {
459 (struct ath_desc *) ds);
460 } else if (is_aggr) {
461 /*
462 * This clears the aggrlen field; so
463 * the caller needs to call set_aggr_first()!
464 *
465 * XXX TODO: don't call this for the first
466 * descriptor in the first frame in an
467 * aggregate!
468 */
470 (struct ath_desc *) ds,
471 bf->bf_state.bfs_ndelim);
472 }
473 isFirstDesc = 0;
474 bf->bf_lastds = (struct ath_desc *) ds;
475
476 /*
477 * Don't forget to skip to the next descriptor.
478 */
479 ds += sc->sc_tx_desclen;
480 dsp++;
481
482 /*
483 * .. and don't forget to blank these out!
484 */
485 bzero(bufAddrList, sizeof(bufAddrList));
486 bzero(segLenList, sizeof(segLenList));
487 }
488 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
489}
490
491/*
492 * Set the rate control fields in the given descriptor based on
493 * the bf_state fields and node state.
494 *
495 * The bfs fields should already be set with the relevant rate
496 * control information, including whether MRR is to be enabled.
497 *
498 * Since the FreeBSD HAL currently sets up the first TX rate
499 * in ath_hal_setuptxdesc(), this will setup the MRR
500 * conditionally for the pre-11n chips, and call ath_buf_set_rate
501 * unconditionally for 11n chips. These require the 11n rate
502 * scenario to be set if MCS rates are enabled, so it's easier
503 * to just always call it. The caller can then only set rates 2, 3
504 * and 4 if multi-rate retry is needed.
505 */
506static void
507ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
508 struct ath_buf *bf)
509{
510 struct ath_rc_series *rc = bf->bf_state.bfs_rc;
511
512 /* If mrr is disabled, blank tries 1, 2, 3 */
513 if (! bf->bf_state.bfs_ismrr)
514 rc[1].tries = rc[2].tries = rc[3].tries = 0;
515
516#if 0
517 /*
518 * If NOACK is set, just set ntries=1.
519 */
520 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {
521 rc[1].tries = rc[2].tries = rc[3].tries = 0;
522 rc[0].tries = 1;
523 }
524#endif
525
526 /*
527 * Always call - that way a retried descriptor will
528 * have the MRR fields overwritten.
529 *
530 * XXX TODO: see if this is really needed - setting up
531 * the first descriptor should set the MRR fields to 0
532 * for us anyway.
533 */
534 if (ath_tx_is_11n(sc)) {
535 ath_buf_set_rate(sc, ni, bf);
536 } else {
538 , rc[1].ratecode, rc[1].tries
539 , rc[2].ratecode, rc[2].tries
540 , rc[3].ratecode, rc[3].tries
541 );
542 }
543}
544
545/*
546 * Setup segments+descriptors for an 11n aggregate.
547 * bf_first is the first buffer in the aggregate.
548 * The descriptor list must already been linked together using
549 * bf->bf_next.
550 */
551static void
552ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
553{
554 struct ath_buf *bf, *bf_prev = NULL;
555 struct ath_desc *ds0 = bf_first->bf_desc;
556
557 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",
558 __func__, bf_first->bf_state.bfs_nframes,
559 bf_first->bf_state.bfs_al);
560
561 bf = bf_first;
562
563 if (bf->bf_state.bfs_txrate0 == 0)
564 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n",
565 __func__, bf, 0);
566 if (bf->bf_state.bfs_rc[0].ratecode == 0)
567 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n",
568 __func__, bf, 0);
569
570 /*
571 * Setup all descriptors of all subframes - this will
572 * call ath_hal_set11naggrmiddle() on every frame.
573 */
574 while (bf != NULL) {
575 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
576 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",
577 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
579
580 /*
581 * Setup the initial fields for the first descriptor - all
582 * the non-11n specific stuff.
583 */
585 , bf->bf_state.bfs_pktlen /* packet length */
586 , bf->bf_state.bfs_hdrlen /* header length */
587 , bf->bf_state.bfs_atype /* Atheros packet type */
588 , bf->bf_state.bfs_txpower /* txpower */
590 , bf->bf_state.bfs_try0 /* series 0 rate/tries */
591 , bf->bf_state.bfs_keyix /* key cache index */
592 , bf->bf_state.bfs_txantenna /* antenna mode */
593 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */
594 , bf->bf_state.bfs_ctsrate /* rts/cts rate */
595 , bf->bf_state.bfs_ctsduration /* rts/cts duration */
596 );
597
598 /*
599 * First descriptor? Setup the rate control and initial
600 * aggregate header information.
601 */
602 if (bf == bf_first) {
603 /*
604 * setup first desc with rate and aggr info
605 */
606 ath_tx_set_ratectrl(sc, bf->bf_node, bf);
607 }
608
609 /*
610 * Setup the descriptors for a multi-descriptor frame.
611 * This is both aggregate and non-aggregate aware.
612 */
613 ath_tx_chaindesclist(sc, ds0, bf,
614 1, /* is_aggr */
615 !! (bf == bf_first), /* is_first_subframe */
616 !! (bf->bf_next == NULL) /* is_last_subframe */
617 );
618
619 if (bf == bf_first) {
620 /*
621 * Initialise the first 11n aggregate with the
622 * aggregate length and aggregate enable bits.
623 */
625 ds0,
626 bf->bf_state.bfs_al,
627 bf->bf_state.bfs_ndelim);
628 }
629
630 /*
631 * Link the last descriptor of the previous frame
632 * to the beginning descriptor of this frame.
633 */
634 if (bf_prev != NULL)
636 bf->bf_daddr);
637
638 /* Save a copy so we can link the next descriptor in */
639 bf_prev = bf;
640 bf = bf->bf_next;
641 }
642
643 /*
644 * Set the first descriptor bf_lastds field to point to
645 * the last descriptor in the last subframe, that's where
646 * the status update will occur.
647 */
648 bf_first->bf_lastds = bf_prev->bf_lastds;
649
650 /*
651 * And bf_last in the first descriptor points to the end of
652 * the aggregate list.
653 */
654 bf_first->bf_last = bf_prev;
655
656 /*
657 * For non-AR9300 NICs, which require the rate control
658 * in the final descriptor - let's set that up now.
659 *
660 * This is because the filltxdesc() HAL call doesn't
661 * populate the last segment with rate control information
662 * if firstSeg is also true. For non-aggregate frames
663 * that is fine, as the first frame already has rate control
664 * info. But if the last frame in an aggregate has one
665 * descriptor, both firstseg and lastseg will be true and
666 * the rate info isn't copied.
667 *
668 * This is inefficient on MIPS/ARM platforms that have
669 * non-cachable memory for TX descriptors, but we'll just
670 * make do for now.
671 *
672 * As to why the rate table is stashed in the last descriptor
673 * rather than the first descriptor? Because proctxdesc()
674 * is called on the final descriptor in an MPDU or A-MPDU -
675 * ie, the one that gets updated by the hardware upon
676 * completion. That way proctxdesc() doesn't need to know
677 * about the first _and_ last TX descriptor.
678 */
679 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0);
680
681 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);
682}
683
684/*
685 * Hand-off a frame to the multicast TX queue.
686 *
687 * This is a software TXQ which will be appended to the CAB queue
688 * during the beacon setup code.
689 *
690 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID
691 * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated
692 * with the actual hardware txq, or all of this will fall apart.
693 *
694 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state
695 * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated
696 * correctly.
697 */
698static void
699ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
700 struct ath_buf *bf)
701{
703
704 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
705 ("%s: busy status 0x%x", __func__, bf->bf_flags));
706
707 /*
708 * Ensure that the tx queue is the cabq, so things get
709 * mapped correctly.
710 */
711 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {
712 DPRINTF(sc, ATH_DEBUG_XMIT,
713 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
714 __func__, bf, bf->bf_state.bfs_tx_queue,
715 txq->axq_qnum);
716 }
717
718 ATH_TXQ_LOCK(txq);
719 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
720 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
721 struct ieee80211_frame *wh;
722
723 /* mark previous frame */
724 wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
725 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
726 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
727 BUS_DMASYNC_PREWRITE);
728
729 /* link descriptor */
731 bf_last->bf_lastds,
732 bf->bf_daddr);
733 }
734 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
735 ATH_TXQ_UNLOCK(txq);
736}
737
738/*
739 * Hand-off packet to a hardware queue.
740 */
741static void
742ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
743 struct ath_buf *bf)
744{
745 struct ath_hal *ah = sc->sc_ah;
746 struct ath_buf *bf_first;
747
748 /*
749 * Insert the frame on the outbound list and pass it on
750 * to the hardware. Multicast frames buffered for power
751 * save stations and transmit from the CAB queue are stored
752 * on a s/w only queue and loaded on to the CAB queue in
753 * the SWBA handler since frames only go out on DTIM and
754 * to avoid possible races.
755 */
757 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
758 ("%s: busy status 0x%x", __func__, bf->bf_flags));
759 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
760 ("ath_tx_handoff_hw called for mcast queue"));
761
762 /*
763 * XXX We should instead just verify that sc_txstart_cnt
764 * or ath_txproc_cnt > 0. That would mean that
765 * the reset is going to be waiting for us to complete.
766 */
767 if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) {
768 device_printf(sc->sc_dev,
769 "%s: TX dispatch without holding txcount/txstart refcnt!\n",
770 __func__);
771 }
772
773 /*
774 * XXX .. this is going to cause the hardware to get upset;
775 * so we really should find some way to drop or queue
776 * things.
777 */
778
779 ATH_TXQ_LOCK(txq);
780
781 /*
782 * XXX TODO: if there's a holdingbf, then
783 * ATH_TXQ_PUTRUNNING should be clear.
784 *
785 * If there is a holdingbf and the list is empty,
786 * then axq_link should be pointing to the holdingbf.
787 *
788 * Otherwise it should point to the last descriptor
789 * in the last ath_buf.
790 *
791 * In any case, we should really ensure that we
792 * update the previous descriptor link pointer to
793 * this descriptor, regardless of all of the above state.
794 *
795 * For now this is captured by having axq_link point
796 * to either the holdingbf (if the TXQ list is empty)
797 * or the end of the list (if the TXQ list isn't empty.)
798 * I'd rather just kill axq_link here and do it as above.
799 */
800
801 /*
802 * Append the frame to the TX queue.
803 */
804 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
805 ATH_KTR(sc, ATH_KTR_TX, 3,
806 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
807 "depth=%d",
808 txq->axq_qnum,
809 bf,
810 txq->axq_depth);
811
812 /*
813 * If there's a link pointer, update it.
814 *
815 * XXX we should replace this with the above logic, just
816 * to kill axq_link with fire.
817 */
818 if (txq->axq_link != NULL) {
819 *txq->axq_link = bf->bf_daddr;
820 DPRINTF(sc, ATH_DEBUG_XMIT,
821 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
822 txq->axq_qnum, txq->axq_link,
823 (caddr_t)bf->bf_daddr, bf->bf_desc,
824 txq->axq_depth);
825 ATH_KTR(sc, ATH_KTR_TX, 5,
826 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) "
827 "lastds=%d",
828 txq->axq_qnum, txq->axq_link,
829 (caddr_t)bf->bf_daddr, bf->bf_desc,
830 bf->bf_lastds);
831 }
832
833 /*
834 * If we've not pushed anything into the hardware yet,
835 * push the head of the queue into the TxDP.
836 *
837 * Once we've started DMA, there's no guarantee that
838 * updating the TxDP with a new value will actually work.
839 * So we just don't do that - if we hit the end of the list,
840 * we keep that buffer around (the "holding buffer") and
841 * re-start DMA by updating the link pointer of _that_
842 * descriptor and then restart DMA.
843 */
844 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
845 bf_first = TAILQ_FIRST(&txq->axq_q);
847 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
848 DPRINTF(sc, ATH_DEBUG_XMIT,
849 "%s: TXDP[%u] = %p (%p) depth %d\n",
850 __func__, txq->axq_qnum,
851 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
852 txq->axq_depth);
853 ATH_KTR(sc, ATH_KTR_TX, 5,
854 "ath_tx_handoff: TXDP[%u] = %p (%p) "
855 "lastds=%p depth %d",
856 txq->axq_qnum,
857 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
858 bf_first->bf_lastds,
859 txq->axq_depth);
860 }
861
862 /*
863 * Ensure that the bf TXQ matches this TXQ, so later
864 * checking and holding buffer manipulation is sane.
865 */
866 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
867 DPRINTF(sc, ATH_DEBUG_XMIT,
868 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
869 __func__, bf, bf->bf_state.bfs_tx_queue,
870 txq->axq_qnum);
871 }
872
873 /*
874 * Track aggregate queue depth.
875 */
876 if (bf->bf_state.bfs_aggr)
877 txq->axq_aggr_depth++;
878
879 /*
880 * Update the link pointer.
881 */
883
884 /*
885 * Start DMA.
886 *
887 * If we wrote a TxDP above, DMA will start from here.
888 *
889 * If DMA is running, it'll do nothing.
890 *
891 * If the DMA engine hit the end of the QCU list (ie LINK=NULL,
892 * or VEOL) then it stops at the last transmitted write.
893 * We then append a new frame by updating the link pointer
894 * in that descriptor and then kick TxE here; it will re-read
895 * that last descriptor and find the new descriptor to transmit.
896 *
897 * This is why we keep the holding descriptor around.
898 */
899 ath_hal_txstart(ah, txq->axq_qnum);
900 ATH_TXQ_UNLOCK(txq);
901 ATH_KTR(sc, ATH_KTR_TX, 1,
902 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
903}
904
905/*
906 * Restart TX DMA for the given TXQ.
907 *
908 * This must be called whether the queue is empty or not.
909 */
910static void
912{
913 struct ath_buf *bf, *bf_last;
914
916
917 /* XXX make this ATH_TXQ_FIRST */
918 bf = TAILQ_FIRST(&txq->axq_q);
919 bf_last = ATH_TXQ_LAST(txq, axq_q_s);
920
921 if (bf == NULL)
922 return;
923
924 DPRINTF(sc, ATH_DEBUG_RESET,
925 "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n",
926 __func__,
927 txq->axq_qnum,
928 bf,
929 bf_last,
930 (uint32_t) bf->bf_daddr);
931
932#ifdef ATH_DEBUG
933 if (sc->sc_debug & ATH_DEBUG_RESET)
934 ath_tx_dump(sc, txq);
935#endif
936
937 /*
938 * This is called from a restart, so DMA is known to be
939 * completely stopped.
940 */
941 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
942 ("%s: Q%d: called with PUTRUNNING=1\n",
943 __func__,
944 txq->axq_qnum));
945
946 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
948
950 &txq->axq_link);
951 ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
952}
953
954/*
955 * Hand off a packet to the hardware (or mcast queue.)
956 *
957 * The relevant hardware txq should be locked.
958 */
959static void
961 struct ath_buf *bf)
962{
964
965#ifdef ATH_DEBUG_ALQ
967 ath_tx_alq_post(sc, bf);
968#endif
969
970 if (txq->axq_qnum == ATH_TXQ_SWQ)
971 ath_tx_handoff_mcast(sc, txq, bf);
972 else
973 ath_tx_handoff_hw(sc, txq, bf);
974}
975
976static int
977ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
978 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
979 int *keyix)
980{
981 DPRINTF(sc, ATH_DEBUG_XMIT,
982 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n",
983 __func__,
984 *hdrlen,
985 *pktlen,
986 isfrag,
987 iswep,
988 m0);
989
990 if (iswep) {
991 const struct ieee80211_cipher *cip;
992 struct ieee80211_key *k;
993
994 /*
995 * Construct the 802.11 header+trailer for an encrypted
996 * frame. The only reason this can fail is because of an
997 * unknown or unsupported cipher/key type.
998 */
999 k = ieee80211_crypto_encap(ni, m0);
1000 if (k == NULL) {
1001 /*
1002 * This can happen when the key is yanked after the
1003 * frame was queued. Just discard the frame; the
1004 * 802.11 layer counts failures and provides
1005 * debugging/diagnostics.
1006 */
1007 return (0);
1008 }
1009 /*
1010 * Adjust the packet + header lengths for the crypto
1011 * additions and calculate the h/w key index. When
1012 * a s/w mic is done the frame will have had any mic
1013 * added to it prior to entry so m0->m_pkthdr.len will
1014 * account for it. Otherwise we need to add it to the
1015 * packet length.
1016 */
1017 cip = k->wk_cipher;
1018 (*hdrlen) += cip->ic_header;
1019 (*pktlen) += cip->ic_header + cip->ic_trailer;
1020 /* NB: frags always have any TKIP MIC done in s/w */
1021 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
1022 (*pktlen) += cip->ic_miclen;
1023 (*keyix) = k->wk_keyix;
1024 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
1025 /*
1026 * Use station key cache slot, if assigned.
1027 */
1028 (*keyix) = ni->ni_ucastkey.wk_keyix;
1029 if ((*keyix) == IEEE80211_KEYIX_NONE)
1030 (*keyix) = HAL_TXKEYIX_INVALID;
1031 } else
1032 (*keyix) = HAL_TXKEYIX_INVALID;
1033
1034 return (1);
1035}
1036
1037/*
1038 * Calculate whether interoperability protection is required for
1039 * this frame.
1040 *
1041 * This requires the rate control information be filled in,
1042 * as the protection requirement depends upon the current
1043 * operating mode / PHY.
1044 */
1045static void
1047{
1048 struct ieee80211_frame *wh;
1049 uint8_t rix;
1050 uint16_t flags;
1051 int shortPreamble;
1052 const HAL_RATE_TABLE *rt = sc->sc_currates;
1053 struct ieee80211com *ic = &sc->sc_ic;
1054
1055 flags = bf->bf_state.bfs_txflags;
1056 rix = bf->bf_state.bfs_rc[0].rix;
1057 shortPreamble = bf->bf_state.bfs_shpream;
1058 wh = mtod(bf->bf_m, struct ieee80211_frame *);
1059
1060 /* Disable frame protection for TOA probe frames */
1061 if (bf->bf_flags & ATH_BUF_TOA_PROBE) {
1062 /* XXX count */
1064 bf->bf_state.bfs_doprot = 0;
1065 goto finish;
1066 }
1067
1068 /*
1069 * If 802.11g protection is enabled, determine whether
1070 * to use RTS/CTS or just CTS. Note that this is only
1071 * done for OFDM unicast frames.
1072 */
1073 if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1074 rt->info[rix].phy == IEEE80211_T_OFDM &&
1075 (flags & HAL_TXDESC_NOACK) == 0) {
1076 bf->bf_state.bfs_doprot = 1;
1077 /* XXX fragments must use CCK rates w/ protection */
1078 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1079 flags |= HAL_TXDESC_RTSENA;
1080 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1081 flags |= HAL_TXDESC_CTSENA;
1082 }
1083 /*
1084 * For frags it would be desirable to use the
1085 * highest CCK rate for RTS/CTS. But stations
1086 * farther away may detect it at a lower CCK rate
1087 * so use the configured protection rate instead
1088 * (for now).
1089 */
1091 }
1092
1093 /*
1094 * If 11n protection is enabled and it's a HT frame,
1095 * enable RTS.
1096 *
1097 * XXX ic_htprotmode or ic_curhtprotmode?
1098 * XXX should it_htprotmode only matter if ic_curhtprotmode
1099 * XXX indicates it's not a HT pure environment?
1100 */
1101 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
1102 rt->info[rix].phy == IEEE80211_T_HT &&
1103 (flags & HAL_TXDESC_NOACK) == 0) {
1104 flags |= HAL_TXDESC_RTSENA;
1106 }
1107
1108finish:
1109 bf->bf_state.bfs_txflags = flags;
1110}
1111
1112/*
1113 * Update the frame duration given the currently selected rate.
1114 *
1115 * This also updates the frame duration value, so it will require
1116 * a DMA flush.
1117 */
1118static void
1120{
1121 struct ieee80211_frame *wh;
1122 uint8_t rix;
1123 uint16_t flags;
1124 int shortPreamble;
1125 struct ath_hal *ah = sc->sc_ah;
1126 const HAL_RATE_TABLE *rt = sc->sc_currates;
1127 int isfrag = bf->bf_m->m_flags & M_FRAG;
1128
1129 flags = bf->bf_state.bfs_txflags;
1130 rix = bf->bf_state.bfs_rc[0].rix;
1131 shortPreamble = bf->bf_state.bfs_shpream;
1132 wh = mtod(bf->bf_m, struct ieee80211_frame *);
1133
1134 /*
1135 * Calculate duration. This logically belongs in the 802.11
1136 * layer but it lacks sufficient information to calculate it.
1137 */
1138 if ((flags & HAL_TXDESC_NOACK) == 0 &&
1139 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
1140 u_int16_t dur;
1141 if (shortPreamble)
1142 dur = rt->info[rix].spAckDuration;
1143 else
1144 dur = rt->info[rix].lpAckDuration;
1145 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
1146 dur += dur; /* additional SIFS+ACK */
1147 /*
1148 * Include the size of next fragment so NAV is
1149 * updated properly. The last fragment uses only
1150 * the ACK duration
1151 *
1152 * XXX TODO: ensure that the rate lookup for each
1153 * fragment is the same as the rate used by the
1154 * first fragment!
1155 */
1156 dur += ath_hal_computetxtime(ah,
1157 rt,
1158 bf->bf_nextfraglen,
1159 rix, shortPreamble,
1160 AH_TRUE);
1161 }
1162 if (isfrag) {
1163 /*
1164 * Force hardware to use computed duration for next
1165 * fragment by disabling multi-rate retry which updates
1166 * duration based on the multi-rate duration table.
1167 */
1168 bf->bf_state.bfs_ismrr = 0;
1170 /* XXX update bfs_rc[0].try? */
1171 }
1172
1173 /* Update the duration field itself */
1174 *(u_int16_t *)wh->i_dur = htole16(dur);
1175 }
1176}
1177
1178static uint8_t
1180 int cix, int shortPreamble)
1181{
1182 uint8_t ctsrate;
1183
1184 /*
1185 * CTS transmit rate is derived from the transmit rate
1186 * by looking in the h/w rate table. We must also factor
1187 * in whether or not a short preamble is to be used.
1188 */
1189 /* NB: cix is set above where RTS/CTS is enabled */
1190 KASSERT(cix != 0xff, ("cix not setup"));
1191 ctsrate = rt->info[cix].rateCode;
1192
1193 /* XXX this should only matter for legacy rates */
1194 if (shortPreamble)
1195 ctsrate |= rt->info[cix].shortPreamble;
1196
1197 return (ctsrate);
1198}
1199
1200/*
1201 * Calculate the RTS/CTS duration for legacy frames.
1202 */
1203static int
1204ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
1205 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,
1206 int flags)
1207{
1208 int ctsduration = 0;
1209
1210 /* This mustn't be called for HT modes */
1211 if (rt->info[cix].phy == IEEE80211_T_HT) {
1212 printf("%s: HT rate where it shouldn't be (0x%x)\n",
1213 __func__, rt->info[cix].rateCode);
1214 return (-1);
1215 }
1216
1217 /*
1218 * Compute the transmit duration based on the frame
1219 * size and the size of an ACK frame. We call into the
1220 * HAL to do the computation since it depends on the
1221 * characteristics of the actual PHY being used.
1222 *
1223 * NB: CTS is assumed the same size as an ACK so we can
1224 * use the precalculated ACK durations.
1225 */
1226 if (shortPreamble) {
1227 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
1228 ctsduration += rt->info[cix].spAckDuration;
1229 ctsduration += ath_hal_computetxtime(ah,
1230 rt, pktlen, rix, AH_TRUE, AH_TRUE);
1231 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
1232 ctsduration += rt->info[rix].spAckDuration;
1233 } else {
1234 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
1235 ctsduration += rt->info[cix].lpAckDuration;
1236 ctsduration += ath_hal_computetxtime(ah,
1237 rt, pktlen, rix, AH_FALSE, AH_TRUE);
1238 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
1239 ctsduration += rt->info[rix].lpAckDuration;
1240 }
1241
1242 return (ctsduration);
1243}
1244
1245/*
1246 * Update the given ath_buf with updated rts/cts setup and duration
1247 * values.
1248 *
1249 * To support rate lookups for each software retry, the rts/cts rate
1250 * and cts duration must be re-calculated.
1251 *
1252 * This function assumes the RTS/CTS flags have been set as needed;
1253 * mrr has been disabled; and the rate control lookup has been done.
1254 *
1255 * XXX TODO: MRR need only be disabled for the pre-11n NICs.
1256 * XXX The 11n NICs support per-rate RTS/CTS configuration.
1257 */
1258static void
1259ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
1260{
1261 uint16_t ctsduration = 0;
1262 uint8_t ctsrate = 0;
1263 uint8_t rix = bf->bf_state.bfs_rc[0].rix;
1264 uint8_t cix = 0;
1265 const HAL_RATE_TABLE *rt = sc->sc_currates;
1266
1267 /*
1268 * No RTS/CTS enabled? Don't bother.
1269 */
1270 if ((bf->bf_state.bfs_txflags &
1272 /* XXX is this really needed? */
1273 bf->bf_state.bfs_ctsrate = 0;
1274 bf->bf_state.bfs_ctsduration = 0;
1275 return;
1276 }
1277
1278 /*
1279 * If protection is enabled, use the protection rix control
1280 * rate. Otherwise use the rate0 control rate.
1281 */
1282 if (bf->bf_state.bfs_doprot)
1283 rix = sc->sc_protrix;
1284 else
1285 rix = bf->bf_state.bfs_rc[0].rix;
1286
1287 /*
1288 * If the raw path has hard-coded ctsrate0 to something,
1289 * use it.
1290 */
1291 if (bf->bf_state.bfs_ctsrate0 != 0)
1292 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
1293 else
1294 /* Control rate from above */
1295 cix = rt->info[rix].controlRate;
1296
1297 /* Calculate the rtscts rate for the given cix */
1298 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
1299 bf->bf_state.bfs_shpream);
1300
1301 /* The 11n chipsets do ctsduration calculations for you */
1302 if (! ath_tx_is_11n(sc))
1303 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
1305 rt, bf->bf_state.bfs_txflags);
1306
1307 /* Squirrel away in ath_buf */
1308 bf->bf_state.bfs_ctsrate = ctsrate;
1309 bf->bf_state.bfs_ctsduration = ctsduration;
1310
1311 /*
1312 * Must disable multi-rate retry when using RTS/CTS.
1313 */
1314 if (!sc->sc_mrrprot) {
1315 bf->bf_state.bfs_ismrr = 0;
1316 bf->bf_state.bfs_try0 =
1317 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
1318 }
1319}
1320
1321/*
1322 * Setup the descriptor chain for a normal or fast-frame
1323 * frame.
1324 *
1325 * XXX TODO: extend to include the destination hardware QCU ID.
1326 * Make sure that is correct. Make sure that when being added
1327 * to the mcastq, the CABQ QCUID is set or things will get a bit
1328 * odd.
1329 */
1330static void
1331ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
1332{
1333 struct ath_desc *ds = bf->bf_desc;
1334 struct ath_hal *ah = sc->sc_ah;
1335
1336 if (bf->bf_state.bfs_txrate0 == 0)
1337 DPRINTF(sc, ATH_DEBUG_XMIT,
1338 "%s: bf=%p, txrate0=%d\n", __func__, bf, 0);
1339
1340 ath_hal_setuptxdesc(ah, ds
1341 , bf->bf_state.bfs_pktlen /* packet length */
1342 , bf->bf_state.bfs_hdrlen /* header length */
1343 , bf->bf_state.bfs_atype /* Atheros packet type */
1344 , bf->bf_state.bfs_txpower /* txpower */
1345 , bf->bf_state.bfs_txrate0
1346 , bf->bf_state.bfs_try0 /* series 0 rate/tries */
1347 , bf->bf_state.bfs_keyix /* key cache index */
1348 , bf->bf_state.bfs_txantenna /* antenna mode */
1349 , bf->bf_state.bfs_txflags /* flags */
1350 , bf->bf_state.bfs_ctsrate /* rts/cts rate */
1351 , bf->bf_state.bfs_ctsduration /* rts/cts duration */
1352 );
1353
1354 /*
1355 * This will be overriden when the descriptor chain is written.
1356 */
1357 bf->bf_lastds = ds;
1358 bf->bf_last = bf;
1359
1360 /* Set rate control and descriptor chain for this frame */
1361 ath_tx_set_ratectrl(sc, bf->bf_node, bf);
1362 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0);
1363}
1364
1365/*
1366 * Do a rate lookup.
1367 *
1368 * This performs a rate lookup for the given ath_buf only if it's required.
1369 * Non-data frames and raw frames don't require it.
1370 *
1371 * This populates the primary and MRR entries; MRR values are
1372 * then disabled later on if something requires it (eg RTS/CTS on
1373 * pre-11n chipsets.
1374 *
1375 * This needs to be done before the RTS/CTS fields are calculated
1376 * as they may depend upon the rate chosen.
1377 */
1378static void
1379ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf, int tid,
1380 int pktlen, int is_aggr)
1381{
1382 uint8_t rate, rix;
1383 int try0;
1384 int maxdur; // Note: Unused for now
1385 int maxpktlen;
1386
1387 if (! bf->bf_state.bfs_doratelookup)
1388 return;
1389
1390 /* Get rid of any previous state */
1391 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1392
1395 pktlen, tid, is_aggr, &rix, &try0, &rate, &maxdur, &maxpktlen);
1396
1397 /* In case MRR is disabled, make sure rc[0] is setup correctly */
1398 bf->bf_state.bfs_rc[0].rix = rix;
1399 bf->bf_state.bfs_rc[0].ratecode = rate;
1400 bf->bf_state.bfs_rc[0].tries = try0;
1401
1402 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
1404 is_aggr, bf->bf_state.bfs_rc);
1406
1407 sc->sc_txrix = rix; /* for LED blinking */
1408 sc->sc_lastdatarix = rix; /* for fast frames */
1409 bf->bf_state.bfs_try0 = try0;
1410 bf->bf_state.bfs_txrate0 = rate;
1411 bf->bf_state.bfs_rc_maxpktlen = maxpktlen;
1412}
1413
1414/*
1415 * Update the CLRDMASK bit in the ath_buf if it needs to be set.
1416 */
1417static void
1419 struct ath_buf *bf)
1420{
1421 struct ath_node *an = ATH_NODE(bf->bf_node);
1422
1424
1425 if (an->clrdmask == 1) {
1427 an->clrdmask = 0;
1428 }
1429}
1430
1431/*
1432 * Return whether this frame should be software queued or
1433 * direct dispatched.
1434 *
1435 * When doing powersave, BAR frames should be queued but other management
1436 * frames should be directly sent.
1437 *
1438 * When not doing powersave, stick BAR frames into the hardware queue
1439 * so it goes out even though the queue is paused.
1440 *
1441 * For now, management frames are also software queued by default.
1442 */
1443static int
1445 struct mbuf *m0, int *queue_to_head)
1446{
1447 struct ieee80211_node *ni = &an->an_node;
1448 struct ieee80211_frame *wh;
1449 uint8_t type, subtype;
1450
1451 wh = mtod(m0, struct ieee80211_frame *);
1452 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1453 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1454
1455 (*queue_to_head) = 0;
1456
1457 /* If it's not in powersave - direct-dispatch BAR */
1458 if ((ATH_NODE(ni)->an_is_powersave == 0)
1459 && type == IEEE80211_FC0_TYPE_CTL &&
1460 subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1461 DPRINTF(sc, ATH_DEBUG_SW_TX,
1462 "%s: BAR: TX'ing direct\n", __func__);
1463 return (0);
1464 } else if ((ATH_NODE(ni)->an_is_powersave == 1)
1465 && type == IEEE80211_FC0_TYPE_CTL &&
1466 subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1467 /* BAR TX whilst asleep; queue */
1468 DPRINTF(sc, ATH_DEBUG_SW_TX,
1469 "%s: swq: TX'ing\n", __func__);
1470 (*queue_to_head) = 1;
1471 return (1);
1472 } else if ((ATH_NODE(ni)->an_is_powersave == 1)
1473 && (type == IEEE80211_FC0_TYPE_MGT ||
1474 type == IEEE80211_FC0_TYPE_CTL)) {
1475 /*
1476 * Other control/mgmt frame; bypass software queuing
1477 * for now!
1478 */
1479 DPRINTF(sc, ATH_DEBUG_XMIT,
1480 "%s: %6D: Node is asleep; sending mgmt "
1481 "(type=%d, subtype=%d)\n",
1482 __func__, ni->ni_macaddr, ":", type, subtype);
1483 return (0);
1484 } else {
1485 return (1);
1486 }
1487}
1488
1489/*
1490 * Transmit the given frame to the hardware.
1491 *
1492 * The frame must already be setup; rate control must already have
1493 * been done.
1494 *
1495 * XXX since the TXQ lock is being held here (and I dislike holding
1496 * it for this long when not doing software aggregation), later on
1497 * break this function into "setup_normal" and "xmit_normal". The
1498 * lock only needs to be held for the ath_tx_handoff call.
1499 *
1500 * XXX we don't update the leak count here - if we're doing
1501 * direct frame dispatch, we need to be able to do it without
1502 * decrementing the leak count (eg multicast queue frames.)
1503 */
1504static void
1505ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1506 struct ath_buf *bf)
1507{
1508 struct ath_node *an = ATH_NODE(bf->bf_node);
1509 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
1510
1512
1513 /*
1514 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does
1515 * set a completion handler however it doesn't (yet) properly
1516 * handle the strict ordering requirements needed for normal,
1517 * non-aggregate session frames.
1518 *
1519 * Once this is implemented, only set CLRDMASK like this for
1520 * frames that must go out - eg management/raw frames.
1521 */
1523
1524 /* Setup the descriptor before handoff */
1525 ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, false);
1526 ath_tx_calc_duration(sc, bf);
1527 ath_tx_calc_protection(sc, bf);
1528 ath_tx_set_rtscts(sc, bf);
1530 ath_tx_setds(sc, bf);
1531
1532 /* Track per-TID hardware queue depth correctly */
1533 tid->hwq_depth++;
1534
1535 /* Assign the completion handler */
1537
1538 /* Hand off to hardware */
1539 ath_tx_handoff(sc, txq, bf);
1540}
1541
1542/*
1543 * Do the basic frame setup stuff that's required before the frame
1544 * is added to a software queue.
1545 *
1546 * All frames get mostly the same treatment and it's done once.
1547 * Retransmits fiddle with things like the rate control setup,
1548 * setting the retransmit bit in the packet; doing relevant DMA/bus
1549 * syncing and relinking it (back) into the hardware TX queue.
1550 *
1551 * Note that this may cause the mbuf to be reallocated, so
1552 * m0 may not be valid.
1553 */
1554static int
1555ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
1556 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1557{
1558 struct ieee80211vap *vap = ni->ni_vap;
1559 struct ieee80211com *ic = &sc->sc_ic;
1560 int error, iswep, ismcast, isfrag, ismrr;
1561 int keyix, hdrlen, pktlen, try0 = 0;
1562 u_int8_t rix = 0, txrate = 0;
1563 struct ath_desc *ds;
1564 struct ieee80211_frame *wh;
1565 u_int subtype, flags;
1566 HAL_PKT_TYPE atype;
1567 const HAL_RATE_TABLE *rt;
1568 HAL_BOOL shortPreamble;
1569 struct ath_node *an;
1570
1571 /* XXX TODO: this pri is only used for non-QoS check, right? */
1572 u_int pri;
1573
1574 /*
1575 * To ensure that both sequence numbers and the CCMP PN handling
1576 * is "correct", make sure that the relevant TID queue is locked.
1577 * Otherwise the CCMP PN and seqno may appear out of order, causing
1578 * re-ordered frames to have out of order CCMP PN's, resulting
1579 * in many, many frame drops.
1580 */
1582
1583 wh = mtod(m0, struct ieee80211_frame *);
1584 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
1585 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1586 isfrag = m0->m_flags & M_FRAG;
1587 hdrlen = ieee80211_anyhdrsize(wh);
1588 /*
1589 * Packet length must not include any
1590 * pad bytes; deduct them here.
1591 */
1592 pktlen = m0->m_pkthdr.len - (hdrlen & 3);
1593
1594 /* Handle encryption twiddling if needed */
1595 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
1596 &pktlen, &keyix)) {
1597 ieee80211_free_mbuf(m0);
1598 return EIO;
1599 }
1600
1601 /* packet header may have moved, reset our local pointer */
1602 wh = mtod(m0, struct ieee80211_frame *);
1603
1604 pktlen += IEEE80211_CRC_LEN;
1605
1606 /*
1607 * Load the DMA map so any coalescing is done. This
1608 * also calculates the number of descriptors we need.
1609 */
1610 error = ath_tx_dmasetup(sc, bf, m0);
1611 if (error != 0)
1612 return error;
1613 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
1614 bf->bf_node = ni; /* NB: held reference */
1615 m0 = bf->bf_m; /* NB: may have changed */
1616 wh = mtod(m0, struct ieee80211_frame *);
1617
1618 /* setup descriptors */
1619 ds = bf->bf_desc;
1620 rt = sc->sc_currates;
1621 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
1622
1623 /*
1624 * NB: the 802.11 layer marks whether or not we should
1625 * use short preamble based on the current mode and
1626 * negotiated parameters.
1627 */
1628 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1629 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
1630 shortPreamble = AH_TRUE;
1632 } else {
1633 shortPreamble = AH_FALSE;
1634 }
1635
1636 an = ATH_NODE(ni);
1637 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
1638 flags = 0;
1639 ismrr = 0; /* default no multi-rate retry*/
1640
1641 pri = ath_tx_getac(sc, m0); /* honor classification */
1642 /* XXX use txparams instead of fixed values */
1643 /*
1644 * Calculate Atheros packet type from IEEE80211 packet header,
1645 * setup for rate calculations, and select h/w transmit queue.
1646 */
1647 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1648 case IEEE80211_FC0_TYPE_MGT:
1649 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1650 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
1651 atype = HAL_PKT_TYPE_BEACON;
1652 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
1654 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
1655 atype = HAL_PKT_TYPE_ATIM;
1656 else
1657 atype = HAL_PKT_TYPE_NORMAL; /* XXX */
1658 rix = an->an_mgmtrix;
1659 txrate = rt->info[rix].rateCode;
1660 if (shortPreamble)
1661 txrate |= rt->info[rix].shortPreamble;
1662 try0 = ATH_TXMGTTRY;
1663 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
1664 break;
1665 case IEEE80211_FC0_TYPE_CTL:
1666 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */
1667 rix = an->an_mgmtrix;
1668 txrate = rt->info[rix].rateCode;
1669 if (shortPreamble)
1670 txrate |= rt->info[rix].shortPreamble;
1671 try0 = ATH_TXMGTTRY;
1672 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
1673 break;
1674 case IEEE80211_FC0_TYPE_DATA:
1675 atype = HAL_PKT_TYPE_NORMAL; /* default */
1676 /*
1677 * Data frames: multicast frames go out at a fixed rate,
1678 * EAPOL frames use the mgmt frame rate; otherwise consult
1679 * the rate control module for the rate to use.
1680 */
1681 if (ismcast) {
1682 rix = an->an_mcastrix;
1683 txrate = rt->info[rix].rateCode;
1684 if (shortPreamble)
1685 txrate |= rt->info[rix].shortPreamble;
1686 try0 = 1;
1687 } else if (m0->m_flags & M_EAPOL) {
1688 /* XXX? maybe always use long preamble? */
1689 rix = an->an_mgmtrix;
1690 txrate = rt->info[rix].rateCode;
1691 if (shortPreamble)
1692 txrate |= rt->info[rix].shortPreamble;
1693 try0 = ATH_TXMAXTRY; /* XXX?too many? */
1694 } else {
1695 /*
1696 * Do rate lookup on each TX, rather than using
1697 * the hard-coded TX information decided here.
1698 */
1699 ismrr = 1;
1700 bf->bf_state.bfs_doratelookup = 1;
1701 }
1702
1703 /*
1704 * Check whether to set NOACK for this WME category or not.
1705 */
1706 if (ieee80211_wme_vap_ac_is_noack(vap, pri))
1707 flags |= HAL_TXDESC_NOACK;
1708 break;
1709 default:
1710 device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
1711 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
1712 /* XXX statistic */
1713 /* XXX free tx dmamap */
1714 ieee80211_free_mbuf(m0);
1715 return EIO;
1716 }
1717
1718 /*
1719 * There are two known scenarios where the frame AC doesn't match
1720 * what the destination TXQ is.
1721 *
1722 * + non-QoS frames (eg management?) that the net80211 stack has
1723 * assigned a higher AC to, but since it's a non-QoS TID, it's
1724 * being thrown into TID 16. TID 16 gets the AC_BE queue.
1725 * It's quite possible that management frames should just be
1726 * direct dispatched to hardware rather than go via the software
1727 * queue; that should be investigated in the future. There are
1728 * some specific scenarios where this doesn't make sense, mostly
1729 * surrounding ADDBA request/response - hence why that is special
1730 * cased.
1731 *
1732 * + Multicast frames going into the VAP mcast queue. That shows up
1733 * as "TXQ 11".
1734 *
1735 * This driver should eventually support separate TID and TXQ locking,
1736 * allowing for arbitrary AC frames to appear on arbitrary software
1737 * queues, being queued to the "correct" hardware queue when needed.
1738 */
1739#if 0
1740 if (txq != sc->sc_ac2q[pri]) {
1741 DPRINTF(sc, ATH_DEBUG_XMIT,
1742 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
1743 __func__,
1744 txq,
1745 txq->axq_qnum,
1746 pri,
1747 sc->sc_ac2q[pri],
1748 sc->sc_ac2q[pri]->axq_qnum);
1749 }
1750#endif
1751
1752 /*
1753 * Calculate miscellaneous flags.
1754 */
1755 if (ismcast) {
1756 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */
1757 } else if (pktlen > vap->iv_rtsthreshold &&
1758 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
1759 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */
1760 sc->sc_stats.ast_tx_rts++;
1761 }
1762 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */
1763 sc->sc_stats.ast_tx_noack++;
1764#ifdef IEEE80211_SUPPORT_TDMA
1765 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
1766 DPRINTF(sc, ATH_DEBUG_TDMA,
1767 "%s: discard frame, ACK required w/ TDMA\n", __func__);
1768 sc->sc_stats.ast_tdma_ack++;
1769 /* XXX free tx dmamap */
1770 ieee80211_free_mbuf(m0);
1771 return EIO;
1772 }
1773#endif
1774
1775 /*
1776 * If it's a frame to do location reporting on,
1777 * communicate it to the HAL.
1778 */
1779 if (ieee80211_get_toa_params(m0, NULL)) {
1780 device_printf(sc->sc_dev,
1781 "%s: setting TX positioning bit\n", __func__);
1782 flags |= HAL_TXDESC_POS;
1783
1784 /*
1785 * Note: The hardware reports timestamps for
1786 * each of the RX'ed packets as part of the packet
1787 * exchange. So this means things like RTS/CTS
1788 * exchanges, as well as the final ACK.
1789 *
1790 * So, if you send a RTS-protected NULL data frame,
1791 * you'll get an RX report for the RTS response, then
1792 * an RX report for the NULL frame, and then the TX
1793 * completion at the end.
1794 *
1795 * NOTE: it doesn't work right for CCK frames;
1796 * there's no channel info data provided unless
1797 * it's OFDM or HT. Will have to dig into it.
1798 */
1801 }
1802
1803#if 0
1804 /*
1805 * Placeholder: if you want to transmit with the azimuth
1806 * timestamp in the end of the payload, here's where you
1807 * should set the TXDESC field.
1808 */
1809 flags |= HAL_TXDESC_HWTS;
1810#endif
1811
1812 /*
1813 * Determine if a tx interrupt should be generated for
1814 * this descriptor. We take a tx interrupt to reap
1815 * descriptors when the h/w hits an EOL condition or
1816 * when the descriptor is specifically marked to generate
1817 * an interrupt. We periodically mark descriptors in this
1818 * way to insure timely replenishing of the supply needed
1819 * for sending frames. Defering interrupts reduces system
1820 * load and potentially allows more concurrent work to be
1821 * done but if done to aggressively can cause senders to
1822 * backup.
1823 *
1824 * NB: use >= to deal with sc_txintrperiod changing
1825 * dynamically through sysctl.
1826 */
1827 if (flags & HAL_TXDESC_INTREQ) {
1828 txq->axq_intrcnt = 0;
1829 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1830 flags |= HAL_TXDESC_INTREQ;
1831 txq->axq_intrcnt = 0;
1832 }
1833
1834 /* This point forward is actual TX bits */
1835
1836 /*
1837 * At this point we are committed to sending the frame
1838 * and we don't need to look at m_nextpkt; clear it in
1839 * case this frame is part of frag chain.
1840 */
1841 m0->m_nextpkt = NULL;
1842
1843 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
1844 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
1845 sc->sc_hwmap[rix].ieeerate, -1);
1846
1847 if (ieee80211_radiotap_active_vap(vap)) {
1848 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
1849 if (iswep)
1850 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
1851 if (isfrag)
1852 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
1853 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
1854 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni);
1856
1857 ieee80211_radiotap_tx(vap, m0);
1858 }
1859
1860 /* Blank the legacy rate array */
1861 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1862
1863 /*
1864 * ath_buf_set_rate needs at least one rate/try to setup
1865 * the rate scenario.
1866 */
1867 bf->bf_state.bfs_rc[0].rix = rix;
1868 bf->bf_state.bfs_rc[0].tries = try0;
1869 bf->bf_state.bfs_rc[0].ratecode = txrate;
1870
1871 /* Store the decided rate index values away */
1872 bf->bf_state.bfs_pktlen = pktlen;
1873 bf->bf_state.bfs_hdrlen = hdrlen;
1874 bf->bf_state.bfs_atype = atype;
1875 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni);
1876 bf->bf_state.bfs_txrate0 = txrate;
1877 bf->bf_state.bfs_try0 = try0;
1878 bf->bf_state.bfs_keyix = keyix;
1880 bf->bf_state.bfs_txflags = flags;
1881 bf->bf_state.bfs_shpream = shortPreamble;
1882
1883 /* XXX this should be done in ath_tx_setrate() */
1884 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */
1885 bf->bf_state.bfs_ctsrate = 0; /* calculated later */
1886 bf->bf_state.bfs_ctsduration = 0;
1887 bf->bf_state.bfs_ismrr = ismrr;
1888
1889 return 0;
1890}
1891
1892/*
1893 * Queue a frame to the hardware or software queue.
1894 *
1895 * This can be called by the net80211 code.
1896 *
1897 * XXX what about locking? Or, push the seqno assign into the
1898 * XXX aggregate scheduler so its serialised?
1899 *
1900 * XXX When sending management frames via ath_raw_xmit(),
1901 * should CLRDMASK be set unconditionally?
1902 */
1903int
1904ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
1905 struct ath_buf *bf, struct mbuf *m0)
1906{
1907 struct ieee80211vap *vap = ni->ni_vap;
1908 struct ath_vap *avp = ATH_VAP(vap);
1909 int r = 0;
1910 u_int pri;
1911 int tid;
1912 struct ath_txq *txq;
1913 int ismcast;
1914 const struct ieee80211_frame *wh;
1915 int is_ampdu, is_ampdu_tx, is_ampdu_pending;
1916 ieee80211_seq seqno;
1917 uint8_t type, subtype;
1918 int queue_to_head;
1919
1921
1922 /*
1923 * Determine the target hardware queue.
1924 *
1925 * For multicast frames, the txq gets overridden appropriately
1926 * depending upon the state of PS. If powersave is enabled
1927 * then they get added to the cabq for later transmit.
1928 *
1929 * The "fun" issue here is that group addressed frames should
1930 * have the sequence number from a different pool, rather than
1931 * the per-TID pool. That means that even QoS group addressed
1932 * frames will have a sequence number from that global value,
1933 * which means if we transmit different group addressed frames
1934 * at different traffic priorities, the sequence numbers will
1935 * all be out of whack. So - chances are, the right thing
1936 * to do here is to always put group addressed frames into the BE
1937 * queue, and ignore the TID for queue selection.
1938 *
1939 * For any other frame, we do a TID/QoS lookup inside the frame
1940 * to see what the TID should be. If it's a non-QoS frame, the
1941 * AC and TID are overridden. The TID/TXQ code assumes the
1942 * TID is on a predictable hardware TXQ, so we don't support
1943 * having a node TID queued to multiple hardware TXQs.
1944 * This may change in the future but would require some locking
1945 * fudgery.
1946 */
1947 pri = ath_tx_getac(sc, m0);
1948 tid = ath_tx_gettid(sc, m0);
1949
1950 txq = sc->sc_ac2q[pri];
1951 wh = mtod(m0, struct ieee80211_frame *);
1952 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1953 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1954 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1955
1956 /*
1957 * Enforce how deep the multicast queue can grow.
1958 *
1959 * XXX duplicated in ath_raw_xmit().
1960 */
1961 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1962 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
1963 > sc->sc_txq_mcastq_maxdepth) {
1965 m_freem(m0);
1966 return (ENOBUFS);
1967 }
1968 }
1969
1970 /*
1971 * Enforce how deep the unicast queue can grow.
1972 *
1973 * If the node is in power save then we don't want
1974 * the software queue to grow too deep, or a node may
1975 * end up consuming all of the ath_buf entries.
1976 *
1977 * For now, only do this for DATA frames.
1978 *
1979 * We will want to cap how many management/control
1980 * frames get punted to the software queue so it doesn't
1981 * fill up. But the correct solution isn't yet obvious.
1982 * In any case, this check should at least let frames pass
1983 * that we are direct-dispatching.
1984 *
1985 * XXX TODO: duplicate this to the raw xmit path!
1986 */
1987 if (type == IEEE80211_FC0_TYPE_DATA &&
1988 ATH_NODE(ni)->an_is_powersave &&
1989 ATH_NODE(ni)->an_swq_depth >
1992 m_freem(m0);
1993 return (ENOBUFS);
1994 }
1995
1996 /* A-MPDU TX */
1997 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
1998 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
1999 is_ampdu = is_ampdu_tx | is_ampdu_pending;
2000
2001 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",
2002 __func__, tid, pri, is_ampdu);
2003
2004 /* Set local packet state, used to queue packets to hardware */
2005 bf->bf_state.bfs_tid = tid;
2006 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
2007 bf->bf_state.bfs_pri = pri;
2008
2009#if 1
2010 /*
2011 * When servicing one or more stations in power-save mode
2012 * (or) if there is some mcast data waiting on the mcast
2013 * queue (to prevent out of order delivery) multicast frames
2014 * must be bufferd until after the beacon.
2015 *
2016 * TODO: we should lock the mcastq before we check the length.
2017 */
2018 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {
2019 txq = &avp->av_mcastq;
2020 /*
2021 * Mark the frame as eventually belonging on the CAB
2022 * queue, so the descriptor setup functions will
2023 * correctly initialise the descriptor 'qcuId' field.
2024 */
2026 }
2027#endif
2028
2029 /* Do the generic frame setup */
2030 /* XXX should just bzero the bf_state? */
2031 bf->bf_state.bfs_dobaw = 0;
2032
2033 /* A-MPDU TX? Manually set sequence number */
2034 /*
2035 * Don't do it whilst pending; the net80211 layer still
2036 * assigns them.
2037 *
2038 * Don't assign A-MPDU sequence numbers to group address
2039 * frames; they come from a different sequence number space.
2040 */
2041 if (is_ampdu_tx && (! IEEE80211_IS_MULTICAST(wh->i_addr1))) {
2042 /*
2043 * Always call; this function will
2044 * handle making sure that null data frames
2045 * and group-addressed frames don't get a sequence number
2046 * from the current TID and thus mess with the BAW.
2047 */
2048 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
2049
2050 /*
2051 * Don't add QoS NULL frames and group-addressed frames
2052 * to the BAW.
2053 */
2054 if (IEEE80211_QOS_HAS_SEQ(wh) &&
2055 (! IEEE80211_IS_MULTICAST(wh->i_addr1)) &&
2056 (subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL)) {
2057 bf->bf_state.bfs_dobaw = 1;
2058 }
2059 }
2060
2061 /*
2062 * If needed, the sequence number has been assigned.
2063 * Squirrel it away somewhere easy to get to.
2064 */
2065 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
2066
2067 /* Is ampdu pending? fetch the seqno and print it out */
2068 if (is_ampdu_pending)
2069 DPRINTF(sc, ATH_DEBUG_SW_TX,
2070 "%s: tid %d: ampdu pending, seqno %d\n",
2071 __func__, tid, M_SEQNO_GET(m0));
2072
2073 /* This also sets up the DMA map; crypto; frame parameters, etc */
2074 r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
2075
2076 if (r != 0)
2077 goto done;
2078
2079 /* At this point m0 could have changed! */
2080 m0 = bf->bf_m;
2081
2082#if 1
2083 /*
2084 * If it's a multicast frame, do a direct-dispatch to the
2085 * destination hardware queue. Don't bother software
2086 * queuing it.
2087 */
2088 /*
2089 * If it's a BAR frame, do a direct dispatch to the
2090 * destination hardware queue. Don't bother software
2091 * queuing it, as the TID will now be paused.
2092 * Sending a BAR frame can occur from the net80211 txa timer
2093 * (ie, retries) or from the ath txtask (completion call.)
2094 * It queues directly to hardware because the TID is paused
2095 * at this point (and won't be unpaused until the BAR has
2096 * either been TXed successfully or max retries has been
2097 * reached.)
2098 */
2099 /*
2100 * Until things are better debugged - if this node is asleep
2101 * and we're sending it a non-BAR frame, direct dispatch it.
2102 * Why? Because we need to figure out what's actually being
2103 * sent - eg, during reassociation/reauthentication after
2104 * the node (last) disappeared whilst asleep, the driver should
2105 * have unpaused/unsleep'ed the node. So until that is
2106 * sorted out, use this workaround.
2107 */
2108 if (txq == &avp->av_mcastq) {
2109 DPRINTF(sc, ATH_DEBUG_SW_TX,
2110 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf);
2112 ath_tx_xmit_normal(sc, txq, bf);
2113 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2114 &queue_to_head)) {
2115 ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2116 } else {
2118 ath_tx_xmit_normal(sc, txq, bf);
2119 }
2120#else
2121 /*
2122 * For now, since there's no software queue,
2123 * direct-dispatch to the hardware.
2124 */
2126 /*
2127 * Update the current leak count if
2128 * we're leaking frames; and set the
2129 * MORE flag as appropriate.
2130 */
2131 ath_tx_leak_count_update(sc, tid, bf);
2132 ath_tx_xmit_normal(sc, txq, bf);
2133#endif
2134done:
2135 return 0;
2136}
2137
2138static int
2139ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
2140 struct ath_buf *bf, struct mbuf *m0,
2141 const struct ieee80211_bpf_params *params)
2142{
2143 struct ieee80211com *ic = &sc->sc_ic;
2144 struct ieee80211vap *vap = ni->ni_vap;
2145 int error, ismcast, ismrr;
2146 int keyix, hdrlen, pktlen, try0, txantenna;
2147 u_int8_t rix, txrate;
2148 struct ieee80211_frame *wh;
2149 u_int flags;
2150 HAL_PKT_TYPE atype;
2151 const HAL_RATE_TABLE *rt;
2152 struct ath_desc *ds;
2153 u_int pri;
2154 int o_tid = -1;
2155 int do_override;
2156 uint8_t type, subtype;
2157 int queue_to_head;
2158 struct ath_node *an = ATH_NODE(ni);
2159
2161
2162 wh = mtod(m0, struct ieee80211_frame *);
2163 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
2164 hdrlen = ieee80211_anyhdrsize(wh);
2165 /*
2166 * Packet length must not include any
2167 * pad bytes; deduct them here.
2168 */
2169 /* XXX honor IEEE80211_BPF_DATAPAD */
2170 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
2171
2172 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2173 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2174
2175 ATH_KTR(sc, ATH_KTR_TX, 2,
2176 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf);
2177
2178 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",
2179 __func__, ismcast);
2180
2181 pri = params->ibp_pri & 3;
2182 /* Override pri if the frame isn't a QoS one */
2183 if (! IEEE80211_QOS_HAS_SEQ(wh))
2184 pri = ath_tx_getac(sc, m0);
2185
2186 /* XXX If it's an ADDBA, override the correct queue */
2187 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);
2188
2189 /* Map ADDBA to the correct priority */
2190 if (do_override) {
2191#if 1
2192 DPRINTF(sc, ATH_DEBUG_XMIT,
2193 "%s: overriding tid %d pri %d -> %d\n",
2194 __func__, o_tid, pri, TID_TO_WME_AC(o_tid));
2195#endif
2196 pri = TID_TO_WME_AC(o_tid);
2197 }
2198
2199 /*
2200 * "pri" is the hardware queue to transmit on.
2201 *
2202 * Look at the description in ath_tx_start() to understand
2203 * what needs to be "fixed" here so we just use the TID
2204 * for QoS frames.
2205 */
2206
2207 /* Handle encryption twiddling if needed */
2208 if (! ath_tx_tag_crypto(sc, ni,
2209 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
2210 &hdrlen, &pktlen, &keyix)) {
2211 ieee80211_free_mbuf(m0);
2212 return EIO;
2213 }
2214 /* packet header may have moved, reset our local pointer */
2215 wh = mtod(m0, struct ieee80211_frame *);
2216
2217 /* Do the generic frame setup */
2218 /* XXX should just bzero the bf_state? */
2219 bf->bf_state.bfs_dobaw = 0;
2220
2221 error = ath_tx_dmasetup(sc, bf, m0);
2222 if (error != 0)
2223 return error;
2224 m0 = bf->bf_m; /* NB: may have changed */
2225 wh = mtod(m0, struct ieee80211_frame *);
2226 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
2227 bf->bf_node = ni; /* NB: held reference */
2228
2229 /* Always enable CLRDMASK for raw frames for now.. */
2230 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
2231 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
2232 if (params->ibp_flags & IEEE80211_BPF_RTS)
2233 flags |= HAL_TXDESC_RTSENA;
2234 else if (params->ibp_flags & IEEE80211_BPF_CTS) {
2235 /* XXX assume 11g/11n protection? */
2236 bf->bf_state.bfs_doprot = 1;
2237 flags |= HAL_TXDESC_CTSENA;
2238 }
2239 /* XXX leave ismcast to injector? */
2240 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
2241 flags |= HAL_TXDESC_NOACK;
2242
2243 rt = sc->sc_currates;
2244 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
2245
2246 /* Fetch first rate information */
2247 rix = ath_tx_findrix(sc, params->ibp_rate0);
2248 try0 = params->ibp_try0;
2249
2250 /*
2251 * Override EAPOL rate as appropriate.
2252 */
2253 if (m0->m_flags & M_EAPOL) {
2254 /* XXX? maybe always use long preamble? */
2255 rix = an->an_mgmtrix;
2256 try0 = ATH_TXMAXTRY; /* XXX?too many? */
2257 }
2258
2259 /*
2260 * If it's a frame to do location reporting on,
2261 * communicate it to the HAL.
2262 */
2263 if (ieee80211_get_toa_params(m0, NULL)) {
2264 device_printf(sc->sc_dev,
2265 "%s: setting TX positioning bit\n", __func__);
2266 flags |= HAL_TXDESC_POS;
2269 }
2270
2271 txrate = rt->info[rix].rateCode;
2272 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
2273 txrate |= rt->info[rix].shortPreamble;
2274 sc->sc_txrix = rix;
2275 ismrr = (params->ibp_try1 != 0);
2276 txantenna = params->ibp_pri >> 2;
2277 if (txantenna == 0) /* XXX? */
2278 txantenna = sc->sc_txantenna;
2279
2280 /*
2281 * Since ctsrate is fixed, store it away for later
2282 * use when the descriptor fields are being set.
2283 */
2285 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
2286
2287 /*
2288 * NB: we mark all packets as type PSPOLL so the h/w won't
2289 * set the sequence number, duration, etc.
2290 */
2291 atype = HAL_PKT_TYPE_PSPOLL;
2292
2293 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
2294 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
2295 sc->sc_hwmap[rix].ieeerate, -1);
2296
2297 if (ieee80211_radiotap_active_vap(vap)) {
2298 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
2299 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2300 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2301 if (m0->m_flags & M_FRAG)
2302 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
2303 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
2304 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power,
2305 ieee80211_get_node_txpower(ni));
2307
2308 ieee80211_radiotap_tx(vap, m0);
2309 }
2310
2311 /*
2312 * Formulate first tx descriptor with tx controls.
2313 */
2314 ds = bf->bf_desc;
2315 /* XXX check return value? */
2316
2317 /* Store the decided rate index values away */
2318 bf->bf_state.bfs_pktlen = pktlen;
2319 bf->bf_state.bfs_hdrlen = hdrlen;
2320 bf->bf_state.bfs_atype = atype;
2321 bf->bf_state.bfs_txpower = MIN(params->ibp_power,
2322 ieee80211_get_node_txpower(ni));
2323 bf->bf_state.bfs_txrate0 = txrate;
2324 bf->bf_state.bfs_try0 = try0;
2325 bf->bf_state.bfs_keyix = keyix;
2326 bf->bf_state.bfs_txantenna = txantenna;
2327 bf->bf_state.bfs_txflags = flags;
2328 bf->bf_state.bfs_shpream =
2329 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);
2330
2331 /* Set local packet state, used to queue packets to hardware */
2332 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri);
2333 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;
2334 bf->bf_state.bfs_pri = pri;
2335
2336 /* XXX this should be done in ath_tx_setrate() */
2337 bf->bf_state.bfs_ctsrate = 0;
2338 bf->bf_state.bfs_ctsduration = 0;
2339 bf->bf_state.bfs_ismrr = ismrr;
2340
2341 /* Blank the legacy rate array */
2342 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
2343
2344 bf->bf_state.bfs_rc[0].rix = rix;
2345 bf->bf_state.bfs_rc[0].tries = try0;
2346 bf->bf_state.bfs_rc[0].ratecode = txrate;
2347
2348 if (ismrr) {
2349 int rix;
2350
2351 rix = ath_tx_findrix(sc, params->ibp_rate1);
2352 bf->bf_state.bfs_rc[1].rix = rix;
2353 bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
2354
2355 rix = ath_tx_findrix(sc, params->ibp_rate2);
2356 bf->bf_state.bfs_rc[2].rix = rix;
2357 bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
2358
2359 rix = ath_tx_findrix(sc, params->ibp_rate3);
2360 bf->bf_state.bfs_rc[3].rix = rix;
2361 bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
2362 }
2363 /*
2364 * All the required rate control decisions have been made;
2365 * fill in the rc flags.
2366 */
2368
2369 /* NB: no buffered multicast in power save support */
2370
2371 /*
2372 * If we're overiding the ADDBA destination, dump directly
2373 * into the hardware queue, right after any pending
2374 * frames to that node are.
2375 */
2376 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",
2377 __func__, do_override);
2378
2379#if 1
2380 /*
2381 * Put addba frames in the right place in the right TID/HWQ.
2382 */
2383 if (do_override) {
2385 /*
2386 * XXX if it's addba frames, should we be leaking
2387 * them out via the frame leak method?
2388 * XXX for now let's not risk it; but we may wish
2389 * to investigate this later.
2390 */
2391 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2392 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2393 &queue_to_head)) {
2394 /* Queue to software queue */
2395 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
2396 } else {
2398 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2399 }
2400#else
2401 /* Direct-dispatch to the hardware */
2403 /*
2404 * Update the current leak count if
2405 * we're leaking frames; and set the
2406 * MORE flag as appropriate.
2407 */
2408 ath_tx_leak_count_update(sc, tid, bf);
2409 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2410#endif
2411 return 0;
2412}
2413
2414/*
2415 * Send a raw frame.
2416 *
2417 * This can be called by net80211.
2418 */
2419int
2420ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2421 const struct ieee80211_bpf_params *params)
2422{
2423 struct ieee80211com *ic = ni->ni_ic;
2424 struct ath_softc *sc = ic->ic_softc;
2425 struct ath_buf *bf;
2426 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
2427 int error = 0;
2428
2429 ATH_PCU_LOCK(sc);
2430 if (sc->sc_inreset_cnt > 0) {
2431 DPRINTF(sc, ATH_DEBUG_XMIT,
2432 "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2433 error = EIO;
2434 ATH_PCU_UNLOCK(sc);
2435 goto badbad;
2436 }
2437 sc->sc_txstart_cnt++;
2438 ATH_PCU_UNLOCK(sc);
2439
2440 /* Wake the hardware up already */
2441 ATH_LOCK(sc);
2443 ATH_UNLOCK(sc);
2444
2445 ATH_TX_LOCK(sc);
2446
2447 if (!sc->sc_running || sc->sc_invalid) {
2448 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d",
2449 __func__, sc->sc_running, sc->sc_invalid);
2450 m_freem(m);
2451 error = ENETDOWN;
2452 goto bad;
2453 }
2454
2455 /*
2456 * Enforce how deep the multicast queue can grow.
2457 *
2458 * XXX duplicated in ath_tx_start().
2459 */
2460 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2461 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
2462 > sc->sc_txq_mcastq_maxdepth) {
2464 error = ENOBUFS;
2465 }
2466
2467 if (error != 0) {
2468 m_freem(m);
2469 goto bad;
2470 }
2471 }
2472
2473 /*
2474 * Grab a TX buffer and associated resources.
2475 */
2476 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
2477 if (bf == NULL) {
2478 sc->sc_stats.ast_tx_nobuf++;
2479 m_freem(m);
2480 error = ENOBUFS;
2481 goto bad;
2482 }
2483 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n",
2484 m, params, bf);
2485
2486 if (params == NULL) {
2487 /*
2488 * Legacy path; interpret frame contents to decide
2489 * precisely how to send the frame.
2490 */
2491 if (ath_tx_start(sc, ni, bf, m)) {
2492 error = EIO; /* XXX */
2493 goto bad2;
2494 }
2495 } else {
2496 /*
2497 * Caller supplied explicit parameters to use in
2498 * sending the frame.
2499 */
2500 if (ath_tx_raw_start(sc, ni, bf, m, params)) {
2501 error = EIO; /* XXX */
2502 goto bad2;
2503 }
2504 }
2505 sc->sc_wd_timer = 5;
2506 sc->sc_stats.ast_tx_raw++;
2507
2508 /*
2509 * Update the TIM - if there's anything queued to the
2510 * software queue and power save is enabled, we should
2511 * set the TIM.
2512 */
2513 ath_tx_update_tim(sc, ni, 1);
2514
2515 ATH_TX_UNLOCK(sc);
2516
2517 ATH_PCU_LOCK(sc);
2518 sc->sc_txstart_cnt--;
2519 ATH_PCU_UNLOCK(sc);
2520
2521 /* Put the hardware back to sleep if required */
2522 ATH_LOCK(sc);
2524 ATH_UNLOCK(sc);
2525
2526 return 0;
2527
2528bad2:
2529 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, "
2530 "bf=%p",
2531 m,
2532 params,
2533 bf);
2534 ATH_TXBUF_LOCK(sc);
2535 ath_returnbuf_head(sc, bf);
2536 ATH_TXBUF_UNLOCK(sc);
2537
2538bad:
2539 ATH_TX_UNLOCK(sc);
2540
2541 ATH_PCU_LOCK(sc);
2542 sc->sc_txstart_cnt--;
2543 ATH_PCU_UNLOCK(sc);
2544
2545 /* Put the hardware back to sleep if required */
2546 ATH_LOCK(sc);
2548 ATH_UNLOCK(sc);
2549
2550badbad:
2551 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p",
2552 m, params);
2554
2555 return error;
2556}
2557
2558/* Some helper functions */
2559
2560/*
2561 * ADDBA (and potentially others) need to be placed in the same
2562 * hardware queue as the TID/node it's relating to. This is so
2563 * it goes out after any pending non-aggregate frames to the
2564 * same node/TID.
2565 *
2566 * If this isn't done, the ADDBA can go out before the frames
2567 * queued in hardware. Even though these frames have a sequence
2568 * number -earlier- than the ADDBA can be transmitted (but
2569 * no frames whose sequence numbers are after the ADDBA should
2570 * be!) they'll arrive after the ADDBA - and the receiving end
2571 * will simply drop them as being out of the BAW.
2572 *
2573 * The frames can't be appended to the TID software queue - it'll
2574 * never be sent out. So these frames have to be directly
2575 * dispatched to the hardware, rather than queued in software.
2576 * So if this function returns true, the TXQ has to be
2577 * overridden and it has to be directly dispatched.
2578 *
2579 * It's a dirty hack, but someone's gotta do it.
2580 */
2581
2582/*
2583 * XXX doesn't belong here!
2584 */
2585static int
2586ieee80211_is_action(struct ieee80211_frame *wh)
2587{
2588 /* Type: Management frame? */
2589 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
2590 IEEE80211_FC0_TYPE_MGT)
2591 return 0;
2592
2593 /* Subtype: Action frame? */
2594 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) !=
2595 IEEE80211_FC0_SUBTYPE_ACTION)
2596 return 0;
2597
2598 return 1;
2599}
2600
2601/*
2602 * Return an alternate TID for ADDBA request frames.
2603 *
2604 * Yes, this likely should be done in the net80211 layer.
2605 */
2606static int
2608 struct ieee80211_node *ni,
2609 struct mbuf *m0, int *tid)
2610{
2611 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
2612 struct ieee80211_action_ba_addbarequest *ia;
2613 uint8_t *frm;
2614 uint16_t baparamset;
2615
2616 /* Not action frame? Bail */
2617 if (! ieee80211_is_action(wh))
2618 return 0;
2619
2620 /* XXX Not needed for frames we send? */
2621#if 0
2622 /* Correct length? */
2623 if (! ieee80211_parse_action(ni, m))
2624 return 0;
2625#endif
2626
2627 /* Extract out action frame */
2628 frm = (u_int8_t *)&wh[1];
2629 ia = (struct ieee80211_action_ba_addbarequest *) frm;
2630
2631 /* Not ADDBA? Bail */
2632 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)
2633 return 0;
2634 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)
2635 return 0;
2636
2637 /* Extract TID, return it */
2638 baparamset = le16toh(ia->rq_baparamset);
2639 *tid = (int) _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_TID);
2640
2641 return 1;
2642}
2643
2644/* Per-node software queue operations */
2645
2646/*
2647 * Add the current packet to the given BAW.
2648 * It is assumed that the current packet
2649 *
2650 * + fits inside the BAW;
2651 * + already has had a sequence number allocated.
2652 *
2653 * Since the BAW status may be modified by both the ath task and
2654 * the net80211/ifnet contexts, the TID must be locked.
2655 */
2656void
2657ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
2658 struct ath_tid *tid, struct ath_buf *bf)
2659{
2660 int index, cindex;
2661 struct ieee80211_tx_ampdu *tap;
2662
2664
2665 if (bf->bf_state.bfs_isretried)
2666 return;
2667
2668 tap = ath_tx_get_tx_tid(an, tid->tid);
2669
2670 if (! bf->bf_state.bfs_dobaw) {
2671 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2672 "%s: dobaw=0, seqno=%d, window %d:%d\n",
2673 __func__, SEQNO(bf->bf_state.bfs_seqno),
2674 tap->txa_start, tap->txa_wnd);
2675 }
2676
2677 if (bf->bf_state.bfs_addedbaw)
2678 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2679 "%s: re-added? tid=%d, seqno %d; window %d:%d; "
2680 "baw head=%d tail=%d\n",
2681 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2682 tap->txa_start, tap->txa_wnd, tid->baw_head,
2683 tid->baw_tail);
2684
2685 /*
2686 * Verify that the given sequence number is not outside of the
2687 * BAW. Complain loudly if that's the case.
2688 */
2689 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2690 SEQNO(bf->bf_state.bfs_seqno))) {
2691 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2692 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; "
2693 "baw head=%d tail=%d\n",
2694 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2695 tap->txa_start, tap->txa_wnd, tid->baw_head,
2696 tid->baw_tail);
2697 }
2698
2699 /*
2700 * ni->ni_txseqs[] is the currently allocated seqno.
2701 * the txa state contains the current baw start.
2702 */
2703 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
2704 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2705 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2706 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d "
2707 "baw head=%d tail=%d\n",
2708 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2709 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head,
2710 tid->baw_tail);
2711
2712#if 0
2713 assert(tid->tx_buf[cindex] == NULL);
2714#endif
2715 if (tid->tx_buf[cindex] != NULL) {
2716 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2717 "%s: ba packet dup (index=%d, cindex=%d, "
2718 "head=%d, tail=%d)\n",
2719 __func__, index, cindex, tid->baw_head, tid->baw_tail);
2720 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2721 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",
2722 __func__,
2723 tid->tx_buf[cindex],
2724 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),
2725 bf,
2727 );
2728 }
2729 tid->tx_buf[cindex] = bf;
2730
2731 if (index >= ((tid->baw_tail - tid->baw_head) &
2732 (ATH_TID_MAX_BUFS - 1))) {
2733 tid->baw_tail = cindex;
2735 }
2736}
2737
2738/*
2739 * Flip the BAW buffer entry over from the existing one to the new one.
2740 *
2741 * When software retransmitting a (sub-)frame, it is entirely possible that
2742 * the frame ath_buf is marked as BUSY and can't be immediately reused.
2743 * In that instance the buffer is cloned and the new buffer is used for
2744 * retransmit. We thus need to update the ath_buf slot in the BAW buf
2745 * tracking array to maintain consistency.
2746 */
2747static void
2749 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf)
2750{
2751 int index, cindex;
2752 struct ieee80211_tx_ampdu *tap;
2753 int seqno = SEQNO(old_bf->bf_state.bfs_seqno);
2754
2756
2757 tap = ath_tx_get_tx_tid(an, tid->tid);
2758 index = ATH_BA_INDEX(tap->txa_start, seqno);
2759 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2760
2761 /*
2762 * Just warn for now; if it happens then we should find out
2763 * about it. It's highly likely the aggregation session will
2764 * soon hang.
2765 */
2766 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) {
2767 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2768 "%s: retransmitted buffer"
2769 " has mismatching seqno's, BA session may hang.\n",
2770 __func__);
2771 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2772 "%s: old seqno=%d, new_seqno=%d\n", __func__,
2773 old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno);
2774 }
2775
2776 if (tid->tx_buf[cindex] != old_bf) {
2777 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2778 "%s: ath_buf pointer incorrect; "
2779 " has m BA session may hang.\n", __func__);
2780 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2781 "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf);
2782 }
2783
2784 tid->tx_buf[cindex] = new_bf;
2785}
2786
2787/*
2788 * seq_start - left edge of BAW
2789 * seq_next - current/next sequence number to allocate
2790 *
2791 * Since the BAW status may be modified by both the ath task and
2792 * the net80211/ifnet contexts, the TID must be locked.
2793 */
2794static void
2795ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
2796 struct ath_tid *tid, const struct ath_buf *bf)
2797{
2798 int index, cindex;
2799 struct ieee80211_tx_ampdu *tap;
2800 int seqno = SEQNO(bf->bf_state.bfs_seqno);
2801
2803
2804 tap = ath_tx_get_tx_tid(an, tid->tid);
2805 index = ATH_BA_INDEX(tap->txa_start, seqno);
2806 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2807
2808 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2809 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, "
2810 "baw head=%d, tail=%d\n",
2811 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,
2812 cindex, tid->baw_head, tid->baw_tail);
2813
2814 /*
2815 * If this occurs then we have a big problem - something else
2816 * has slid tap->txa_start along without updating the BAW
2817 * tracking start/end pointers. Thus the TX BAW state is now
2818 * completely busted.
2819 *
2820 * But for now, since I haven't yet fixed TDMA and buffer cloning,
2821 * it's quite possible that a cloned buffer is making its way
2822 * here and causing it to fire off. Disable TDMA for now.
2823 */
2824 if (tid->tx_buf[cindex] != bf) {
2825 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2826 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",
2827 __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
2828 tid->tx_buf[cindex],
2829 (tid->tx_buf[cindex] != NULL) ?
2830 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1);
2831 }
2832
2833 tid->tx_buf[cindex] = NULL;
2834
2835 while (tid->baw_head != tid->baw_tail &&
2836 !tid->tx_buf[tid->baw_head]) {
2837 INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
2839 }
2840 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2841 "%s: tid=%d: baw is now %d:%d, baw head=%d\n",
2842 __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head);
2843}
2844
2845static void
2847 struct ath_buf *bf)
2848{
2849 struct ieee80211_frame *wh;
2850
2852
2853 if (tid->an->an_leak_count > 0) {
2854 wh = mtod(bf->bf_m, struct ieee80211_frame *);
2855
2856 /*
2857 * Update MORE based on the software/net80211 queue states.
2858 */
2859 if ((tid->an->an_stack_psq > 0)
2860 || (tid->an->an_swq_depth > 0))
2861 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
2862 else
2863 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA;
2864
2865 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
2866 "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n",
2867 __func__,
2868 tid->an->an_node.ni_macaddr,
2869 ":",
2870 tid->an->an_leak_count,
2871 tid->an->an_stack_psq,
2872 tid->an->an_swq_depth,
2873 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));
2874
2875 /*
2876 * Re-sync the underlying buffer.
2877 */
2878 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2879 BUS_DMASYNC_PREWRITE);
2880
2881 tid->an->an_leak_count --;
2882 }
2883}
2884
2885static int
2887{
2888
2890
2891 if (tid->an->an_leak_count > 0) {
2892 return (1);
2893 }
2894 if (tid->paused)
2895 return (0);
2896 return (1);
2897}
2898
2899/*
2900 * Mark the current node/TID as ready to TX.
2901 *
2902 * This is done to make it easy for the software scheduler to
2903 * find which nodes have data to send.
2904 *
2905 * The TXQ lock must be held.
2906 */
2907void
2908ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
2909{
2910 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2911
2913
2914 /*
2915 * If we are leaking out a frame to this destination
2916 * for PS-POLL, ensure that we allow scheduling to
2917 * occur.
2918 */
2919 if (! ath_tx_tid_can_tx_or_sched(sc, tid))
2920 return; /* paused, can't schedule yet */
2921
2922 if (tid->sched)
2923 return; /* already scheduled */
2924
2925 tid->sched = 1;
2926
2927#if 0
2928 /*
2929 * If this is a sleeping node we're leaking to, given
2930 * it a higher priority. This is so bad for QoS it hurts.
2931 */
2932 if (tid->an->an_leak_count) {
2933 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2934 } else {
2935 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2936 }
2937#endif
2938
2939 /*
2940 * We can't do the above - it'll confuse the TXQ software
2941 * scheduler which will keep checking the _head_ TID
2942 * in the list to see if it has traffic. If we queue
2943 * a TID to the head of the list and it doesn't transmit,
2944 * we'll check it again.
2945 *
2946 * So, get the rest of this leaking frames support working
2947 * and reliable first and _then_ optimise it so they're
2948 * pushed out in front of any other pending software
2949 * queued nodes.
2950 */
2951 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2952}
2953
2954/*
2955 * Mark the current node as no longer needing to be polled for
2956 * TX packets.
2957 *
2958 * The TXQ lock must be held.
2959 */
2960static void
2961ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
2962{
2963 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2964
2966
2967 if (tid->sched == 0)
2968 return;
2969
2970 tid->sched = 0;
2971 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
2972}
2973
2974/*
2975 * Assign a sequence number manually to the given frame.
2976 *
2977 * This should only be called for A-MPDU TX frames.
2978 *
2979 * Note: for group addressed frames, the sequence number
2980 * should be from NONQOS_TID, and net80211 should have
2981 * already assigned it for us.
2982 */
2983static ieee80211_seq
2984ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
2985 struct ath_buf *bf, struct mbuf *m0)
2986{
2987 struct ieee80211_frame *wh;
2988 int tid;
2989 ieee80211_seq seqno;
2990 uint8_t subtype;
2991
2992 wh = mtod(m0, struct ieee80211_frame *);
2993 tid = ieee80211_gettid(wh);
2994
2995 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, qos has seq=%d\n",
2996 __func__, tid, IEEE80211_QOS_HAS_SEQ(wh));
2997
2998 /* XXX Is it a control frame? Ignore */
2999
3000 /* Does the packet require a sequence number? */
3001 if (! IEEE80211_QOS_HAS_SEQ(wh))
3002 return -1;
3003
3005
3006 /*
3007 * Is it a QOS NULL Data frame? Give it a sequence number from
3008 * the default TID (IEEE80211_NONQOS_TID.)
3009 *
3010 * The RX path of everything I've looked at doesn't include the NULL
3011 * data frame sequence number in the aggregation state updates, so
3012 * assigning it a sequence number there will cause a BAW hole on the
3013 * RX side.
3014 */
3015 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3016 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) {
3017 /* XXX no locking for this TID? This is a bit of a problem. */
3018 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
3019 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
3020 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3021 /*
3022 * group addressed frames get a sequence number from
3023 * a different sequence number space.
3024 */
3025 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
3026 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
3027 } else {
3028 /* Manually assign sequence number */
3029 seqno = ni->ni_txseqs[tid];
3030 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);
3031 }
3032 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
3033 M_SEQNO_SET(m0, seqno);
3034
3035 /* Return so caller can do something with it if needed */
3036 DPRINTF(sc, ATH_DEBUG_SW_TX,
3037 "%s: -> subtype=0x%x, tid=%d, seqno=%d\n",
3038 __func__, subtype, tid, seqno);
3039 return seqno;
3040}
3041
3042/*
3043 * Attempt to direct dispatch an aggregate frame to hardware.
3044 * If the frame is out of BAW, queue.
3045 * Otherwise, schedule it as a single frame.
3046 */
3047static void
3048ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
3049 struct ath_txq *txq, struct ath_buf *bf)
3050{
3051 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
3052 struct ieee80211_tx_ampdu *tap;
3053
3055
3056 tap = ath_tx_get_tx_tid(an, tid->tid);
3057
3058 /* paused? queue */
3059 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
3060 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3061 /* XXX don't sched - we're paused! */
3062 return;
3063 }
3064
3065 /* outside baw? queue */
3066 if (bf->bf_state.bfs_dobaw &&
3067 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
3068 SEQNO(bf->bf_state.bfs_seqno)))) {
3069 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3070 ath_tx_tid_sched(sc, tid);
3071 return;
3072 }
3073
3074 /*
3075 * This is a temporary check and should be removed once
3076 * all the relevant code paths have been fixed.
3077 *
3078 * During aggregate retries, it's possible that the head
3079 * frame will fail (which has the bfs_aggr and bfs_nframes
3080 * fields set for said aggregate) and will be retried as
3081 * a single frame. In this instance, the values should
3082 * be reset or the completion code will get upset with you.
3083 */
3084 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) {
3085 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
3086 "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__,
3088 bf->bf_state.bfs_aggr = 0;
3089 bf->bf_state.bfs_nframes = 1;
3090 }
3091
3092 /* Update CLRDMASK just before this frame is queued */
3093 ath_tx_update_clrdmask(sc, tid, bf);
3094
3095 /* Direct dispatch to hardware */
3096 ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen,
3097 false);
3098 ath_tx_calc_duration(sc, bf);
3099 ath_tx_calc_protection(sc, bf);
3100 ath_tx_set_rtscts(sc, bf);
3102 ath_tx_setds(sc, bf);
3103
3104 /* Statistics */
3106
3107 /* Track per-TID hardware queue depth correctly */
3108 tid->hwq_depth++;
3109
3110 /* Add to BAW */
3111 if (bf->bf_state.bfs_dobaw) {
3112 ath_tx_addto_baw(sc, an, tid, bf);
3113 bf->bf_state.bfs_addedbaw = 1;
3114 }
3115
3116 /* Set completion handler, multi-frame aggregate or not */
3118
3119 /*
3120 * Update the current leak count if
3121 * we're leaking frames; and set the
3122 * MORE flag as appropriate.
3123 */
3124 ath_tx_leak_count_update(sc, tid, bf);
3125
3126 /* Hand off to hardware */
3127 ath_tx_handoff(sc, txq, bf);
3128}
3129
3130/*
3131 * Attempt to send the packet.
3132 * If the queue isn't busy, direct-dispatch.
3133 * If the queue is busy enough, queue the given packet on the
3134 * relevant software queue.
3135 */
3136void
3137ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
3138 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3139{
3140 struct ath_node *an = ATH_NODE(ni);
3141 struct ieee80211_frame *wh;
3142 struct ath_tid *atid;
3143 int pri, tid;
3144 struct mbuf *m0 = bf->bf_m;
3145
3147
3148 /* Fetch the TID - non-QoS frames get assigned to TID 16 */
3149 wh = mtod(m0, struct ieee80211_frame *);
3150 pri = ath_tx_getac(sc, m0);
3151 tid = ath_tx_gettid(sc, m0);
3152 atid = &an->an_tid[tid];
3153
3154 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
3155 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
3156
3157 /* Set local packet state, used to queue packets to hardware */
3158 /* XXX potentially duplicate info, re-check */
3159 bf->bf_state.bfs_tid = tid;
3160 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3161 bf->bf_state.bfs_pri = pri;
3162
3163 /*
3164 * If the hardware queue isn't busy, queue it directly.
3165 * If the hardware queue is busy, queue it.
3166 * If the TID is paused or the traffic it outside BAW, software
3167 * queue it.
3168 *
3169 * If the node is in power-save and we're leaking a frame,
3170 * leak a single frame.
3171 */
3172 if (! ath_tx_tid_can_tx_or_sched(sc, atid)) {
3173 /* TID is paused, queue */
3174 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__);
3175 /*
3176 * If the caller requested that it be sent at a high
3177 * priority, queue it at the head of the list.
3178 */
3179 if (queue_to_head)
3180 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
3181 else
3182 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3183 } else if (ath_tx_ampdu_pending(sc, an, tid)) {
3184 /* AMPDU pending; queue */
3185 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__);
3186 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3187 /* XXX sched? */
3188 } else if (ath_tx_ampdu_running(sc, an, tid)) {
3189 /*
3190 * AMPDU running, queue single-frame if the hardware queue
3191 * isn't busy.
3192 *
3193 * If the hardware queue is busy, sending an aggregate frame
3194 * then just hold off so we can queue more aggregate frames.
3195 *
3196 * Otherwise we may end up with single frames leaking through
3197 * because we are dispatching them too quickly.
3198 *
3199 * TODO: maybe we should treat this as two policies - minimise
3200 * latency, or maximise throughput. Then for BE/BK we can
3201 * maximise throughput, and VO/VI (if AMPDU is enabled!)
3202 * minimise latency.
3203 */
3204
3205 /*
3206 * Always queue the frame to the tail of the list.
3207 */
3208 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3209
3210 /*
3211 * If the hardware queue isn't busy, direct dispatch
3212 * the head frame in the list.
3213 *
3214 * Note: if we're say, configured to do ADDBA but not A-MPDU
3215 * then maybe we want to still queue two non-aggregate frames
3216 * to the hardware. Again with the per-TID policy
3217 * configuration..)
3218 *
3219 * Otherwise, schedule the TID.
3220 */
3221 /* XXX TXQ locking */
3222 if (txq->axq_depth + txq->fifo.axq_depth == 0) {
3223 bf = ATH_TID_FIRST(atid);
3224 ATH_TID_REMOVE(atid, bf, bf_list);
3225
3226 /*
3227 * Ensure it's definitely treated as a non-AMPDU
3228 * frame - this information may have been left
3229 * over from a previous attempt.
3230 */
3231 bf->bf_state.bfs_aggr = 0;
3232 bf->bf_state.bfs_nframes = 1;
3233
3234 /* Queue to the hardware */
3235 ath_tx_xmit_aggr(sc, an, txq, bf);
3236 DPRINTF(sc, ATH_DEBUG_SW_TX,
3237 "%s: xmit_aggr\n",
3238 __func__);
3239 } else {
3240 DPRINTF(sc, ATH_DEBUG_SW_TX,
3241 "%s: ampdu; swq'ing\n",
3242 __func__);
3243
3244 ath_tx_tid_sched(sc, atid);
3245 }
3246 /*
3247 * If we're not doing A-MPDU, be prepared to direct dispatch
3248 * up to both limits if possible. This particular corner
3249 * case may end up with packet starvation between aggregate
3250 * traffic and non-aggregate traffic: we want to ensure
3251 * that non-aggregate stations get a few frames queued to the
3252 * hardware before the aggregate station(s) get their chance.
3253 *
3254 * So if you only ever see a couple of frames direct dispatched
3255 * to the hardware from a non-AMPDU client, check both here
3256 * and in the software queue dispatcher to ensure that those
3257 * non-AMPDU stations get a fair chance to transmit.
3258 */
3259 /* XXX TXQ locking */
3260 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3261 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3262 /* AMPDU not running, attempt direct dispatch */
3263 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__);
3264 /* See if clrdmask needs to be set */
3265 ath_tx_update_clrdmask(sc, atid, bf);
3266
3267 /*
3268 * Update the current leak count if
3269 * we're leaking frames; and set the
3270 * MORE flag as appropriate.
3271 */
3272 ath_tx_leak_count_update(sc, atid, bf);
3273
3274 /*
3275 * Dispatch the frame.
3276 */
3277 ath_tx_xmit_normal(sc, txq, bf);
3278 } else {
3279 /* Busy; queue */
3280 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__);
3281 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3282 ath_tx_tid_sched(sc, atid);
3283 }
3284}
3285
3286/*
3287 * Only set the clrdmask bit if none of the nodes are currently
3288 * filtered.
3289 *
3290 * XXX TODO: go through all the callers and check to see
3291 * which are being called in the context of looping over all
3292 * TIDs (eg, if all tids are being paused, resumed, etc.)
3293 * That'll avoid O(n^2) complexity here.
3294 */
3295static void
3297{
3298 int i;
3299
3301
3302 for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3303 if (an->an_tid[i].isfiltered == 1)
3304 return;
3305 }
3306 an->clrdmask = 1;
3307}
3308
3309/*
3310 * Configure the per-TID node state.
3311 *
3312 * This likely belongs in if_ath_node.c but I can't think of anywhere
3313 * else to put it just yet.
3314 *
3315 * This sets up the SLISTs and the mutex as appropriate.
3316 */
3317void
3318ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
3319{
3320 int i, j;
3321 struct ath_tid *atid;
3322
3323 for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3324 atid = &an->an_tid[i];
3325
3326 /* XXX now with this bzer(), is the field 0'ing needed? */
3327 bzero(atid, sizeof(*atid));
3328
3329 TAILQ_INIT(&atid->tid_q);
3330 TAILQ_INIT(&atid->filtq.tid_q);
3331 atid->tid = i;
3332 atid->an = an;
3333 for (j = 0; j < ATH_TID_MAX_BUFS; j++)
3334 atid->tx_buf[j] = NULL;
3335 atid->baw_head = atid->baw_tail = 0;
3336 atid->paused = 0;
3337 atid->sched = 0;
3338 atid->hwq_depth = 0;
3339 atid->cleanup_inprogress = 0;
3340 if (i == IEEE80211_NONQOS_TID)
3341 atid->ac = ATH_NONQOS_TID_AC;
3342 else
3343 atid->ac = TID_TO_WME_AC(i);
3344 }
3345 an->clrdmask = 1; /* Always start by setting this bit */
3346}
3347
3348/*
3349 * Pause the current TID. This stops packets from being transmitted
3350 * on it.
3351 *
3352 * Since this is also called from upper layers as well as the driver,
3353 * it will get the TID lock.
3354 */
3355static void
3357{
3358
3360 tid->paused++;
3361 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n",
3362 __func__,
3363 tid->an->an_node.ni_macaddr, ":",
3364 tid->tid,
3365 tid->paused);
3366}
3367
3368/*
3369 * Unpause the current TID, and schedule it if needed.
3370 */
3371static void
3373{
3375
3376 /*
3377 * There's some odd places where ath_tx_tid_resume() is called
3378 * when it shouldn't be; this works around that particular issue
3379 * until it's actually resolved.
3380 */
3381 if (tid->paused == 0) {
3382 device_printf(sc->sc_dev,
3383 "%s: [%6D]: tid=%d, paused=0?\n",
3384 __func__,
3385 tid->an->an_node.ni_macaddr, ":",
3386 tid->tid);
3387 } else {
3388 tid->paused--;
3389 }
3390
3391 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3392 "%s: [%6D]: tid=%d, unpaused = %d\n",
3393 __func__,
3394 tid->an->an_node.ni_macaddr, ":",
3395 tid->tid,
3396 tid->paused);
3397
3398 if (tid->paused)
3399 return;
3400
3401 /*
3402 * Override the clrdmask configuration for the next frame
3403 * from this TID, just to get the ball rolling.
3404 */
3405 ath_tx_set_clrdmask(sc, tid->an);
3406
3407 if (tid->axq_depth == 0)
3408 return;
3409
3410 /* XXX isfiltered shouldn't ever be 0 at this point */
3411 if (tid->isfiltered == 1) {
3412 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n",
3413 __func__);
3414 return;
3415 }
3416
3417 ath_tx_tid_sched(sc, tid);
3418
3419 /*
3420 * Queue the software TX scheduler.
3421 */
3422 ath_tx_swq_kick(sc);
3423}
3424
3425/*
3426 * Add the given ath_buf to the TID filtered frame list.
3427 * This requires the TID be filtered.
3428 */
3429static void
3431 struct ath_buf *bf)
3432{
3433
3435
3436 if (!tid->isfiltered)
3437 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n",
3438 __func__);
3439
3440 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf);
3441
3442 /* Set the retry bit and bump the retry counter */
3443 ath_tx_set_retry(sc, bf);
3445
3446 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list);
3447}
3448
3449/*
3450 * Handle a completed filtered frame from the given TID.
3451 * This just enables/pauses the filtered frame state if required
3452 * and appends the filtered frame to the filtered queue.
3453 */
3454static void
3456 struct ath_buf *bf)
3457{
3458
3460
3461 if (! tid->isfiltered) {
3462 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n",
3463 __func__, tid->tid);
3464 tid->isfiltered = 1;
3465 ath_tx_tid_pause(sc, tid);
3466 }
3467
3468 /* Add the frame to the filter queue */
3469 ath_tx_tid_filt_addbuf(sc, tid, bf);
3470}
3471
3472/*
3473 * Complete the filtered frame TX completion.
3474 *
3475 * If there are no more frames in the hardware queue, unpause/unfilter
3476 * the TID if applicable. Otherwise we will wait for a node PS transition
3477 * to unfilter.
3478 */
3479static void
3481{
3482 struct ath_buf *bf;
3483 int do_resume = 0;
3484
3486
3487 if (tid->hwq_depth != 0)
3488 return;
3489
3490 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n",
3491 __func__, tid->tid);
3492 if (tid->isfiltered == 1) {
3493 tid->isfiltered = 0;
3494 do_resume = 1;
3495 }
3496
3497 /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */
3498 ath_tx_set_clrdmask(sc, tid->an);
3499
3500 /* XXX this is really quite inefficient */
3501 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) {
3502 ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3503 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3504 }
3505
3506 /* And only resume if we had paused before */
3507 if (do_resume)
3508 ath_tx_tid_resume(sc, tid);
3509}
3510
3511/*
3512 * Called when a single (aggregate or otherwise) frame is completed.
3513 *
3514 * Returns 0 if the buffer could be added to the filtered list
3515 * (cloned or otherwise), 1 if the buffer couldn't be added to the
3516 * filtered list (failed clone; expired retry) and the caller should
3517 * free it and handle it like a failure (eg by sending a BAR.)
3518 *
3519 * since the buffer may be cloned, bf must be not touched after this
3520 * if the return value is 0.
3521 */
3522static int
3524 struct ath_buf *bf)
3525{
3526 struct ath_buf *nbf;
3527 int retval;
3528
3530
3531 /*
3532 * Don't allow a filtered frame to live forever.
3533 */
3536 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3537 "%s: bf=%p, seqno=%d, exceeded retries\n",
3538 __func__,
3539 bf,
3540 SEQNO(bf->bf_state.bfs_seqno));
3541 retval = 1; /* error */
3542 goto finish;
3543 }
3544
3545 /*
3546 * A busy buffer can't be added to the retry list.
3547 * It needs to be cloned.
3548 */
3549 if (bf->bf_flags & ATH_BUF_BUSY) {
3550 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3551 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3552 "%s: busy buffer clone: %p -> %p\n",
3553 __func__, bf, nbf);
3554 } else {
3555 nbf = bf;
3556 }
3557
3558 if (nbf == NULL) {
3559 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3560 "%s: busy buffer couldn't be cloned (%p)!\n",
3561 __func__, bf);
3562 retval = 1; /* error */
3563 } else {
3564 ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3565 retval = 0; /* ok */
3566 }
3567finish:
3569
3570 return (retval);
3571}
3572
3573static void
3575 struct ath_buf *bf_first, ath_bufhead *bf_q)
3576{
3577 struct ath_buf *bf, *bf_next, *nbf;
3578
3580
3581 bf = bf_first;
3582 while (bf) {
3583 bf_next = bf->bf_next;
3584 bf->bf_next = NULL; /* Remove it from the aggr list */
3585
3586 /*
3587 * Don't allow a filtered frame to live forever.
3588 */
3591 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3592 "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n",
3593 __func__,
3594 tid->tid,
3595 bf,
3596 SEQNO(bf->bf_state.bfs_seqno));
3597 TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3598 goto next;
3599 }
3600
3601 if (bf->bf_flags & ATH_BUF_BUSY) {
3602 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3603 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3604 "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n",
3605 __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno));
3606 } else {
3607 nbf = bf;
3608 }
3609
3610 /*
3611 * If the buffer couldn't be cloned, add it to bf_q;
3612 * the caller will free the buffer(s) as required.
3613 */
3614 if (nbf == NULL) {
3615 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3616 "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n",
3617 __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno));
3618 TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3619 } else {
3620 ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3621 }
3622next:
3623 bf = bf_next;
3624 }
3625
3627}
3628
3629/*
3630 * Suspend the queue because we need to TX a BAR.
3631 */
3632static void
3634{
3635
3637
3638 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3639 "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n",
3640 __func__,
3641 tid->tid,
3642 tid->bar_wait,
3643 tid->bar_tx);
3644
3645 /* We shouldn't be called when bar_tx is 1 */
3646 if (tid->bar_tx) {
3647 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3648 "%s: bar_tx is 1?!\n", __func__);
3649 }
3650
3651 /* If we've already been called, just be patient. */
3652 if (tid->bar_wait)
3653 return;
3654
3655 /* Wait! */
3656 tid->bar_wait = 1;
3657
3658 /* Only one pause, no matter how many frames fail */
3659 ath_tx_tid_pause(sc, tid);
3660}
3661
3662/*
3663 * We've finished with BAR handling - either we succeeded or
3664 * failed. Either way, unsuspend TX.
3665 */
3666static void
3668{
3669
3671
3672 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3673 "%s: %6D: TID=%d, called\n",
3674 __func__,
3675 tid->an->an_node.ni_macaddr,
3676 ":",
3677 tid->tid);
3678
3679 if (tid->bar_tx == 0 || tid->bar_wait == 0) {
3680 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3681 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3682 __func__, tid->an->an_node.ni_macaddr, ":",
3683 tid->tid, tid->bar_tx, tid->bar_wait);
3684 }
3685
3686 tid->bar_tx = tid->bar_wait = 0;
3687 ath_tx_tid_resume(sc, tid);
3688}
3689
3690/*
3691 * Return whether we're ready to TX a BAR frame.
3692 *
3693 * Requires the TID lock be held.
3694 */
3695static int
3697{
3698
3700
3701 if (tid->bar_wait == 0 || tid->hwq_depth > 0)
3702 return (0);
3703
3704 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3705 "%s: %6D: TID=%d, bar ready\n",
3706 __func__,
3707 tid->an->an_node.ni_macaddr,
3708 ":",
3709 tid->tid);
3710
3711 return (1);
3712}
3713
3714/*
3715 * Check whether the current TID is ready to have a BAR
3716 * TXed and if so, do the TX.
3717 *
3718 * Since the TID/TXQ lock can't be held during a call to
3719 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it,
3720 * sending the BAR and locking it again.
3721 *
3722 * Eventually, the code to send the BAR should be broken out
3723 * from this routine so the lock doesn't have to be reacquired
3724 * just to be immediately dropped by the caller.
3725 */
3726static void
3727ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid)
3728{
3729 struct ieee80211_tx_ampdu *tap;
3730
3732
3733 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3734 "%s: %6D: TID=%d, called\n",
3735 __func__,
3736 tid->an->an_node.ni_macaddr,
3737 ":",
3738 tid->tid);
3739
3740 tap = ath_tx_get_tx_tid(tid->an, tid->tid);
3741
3742 /*
3743 * This is an error condition!
3744 */
3745 if (tid->bar_wait == 0 || tid->bar_tx == 1) {
3746 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3747 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3748 __func__, tid->an->an_node.ni_macaddr, ":",
3749 tid->tid, tid->bar_tx, tid->bar_wait);
3750 return;
3751 }
3752
3753 /* Don't do anything if we still have pending frames */
3754 if (tid->hwq_depth > 0) {
3755 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3756 "%s: %6D: TID=%d, hwq_depth=%d, waiting\n",
3757 __func__,
3758 tid->an->an_node.ni_macaddr,
3759 ":",
3760 tid->tid,
3761 tid->hwq_depth);
3762 return;
3763 }
3764
3765 /* We're now about to TX */
3766 tid->bar_tx = 1;
3767
3768 /*
3769 * Override the clrdmask configuration for the next frame,
3770 * just to get the ball rolling.
3771 */
3772 ath_tx_set_clrdmask(sc, tid->an);
3773
3774 /*
3775 * Calculate new BAW left edge, now that all frames have either
3776 * succeeded or failed.
3777 *
3778 * XXX verify this is _actually_ the valid value to begin at!
3779 */
3780 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3781 "%s: %6D: TID=%d, new BAW left edge=%d\n",
3782 __func__,
3783 tid->an->an_node.ni_macaddr,
3784 ":",
3785 tid->tid,
3786 tap->txa_start);
3787
3788 /* Try sending the BAR frame */
3789 /* We can't hold the lock here! */
3790
3791 ATH_TX_UNLOCK(sc);
3792 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) {
3793 /* Success? Now we wait for notification that it's done */
3794 ATH_TX_LOCK(sc);
3795 return;
3796 }
3797
3798 /* Failure? For now, warn loudly and continue */
3799 ATH_TX_LOCK(sc);
3800 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3801 "%s: %6D: TID=%d, failed to TX BAR, continue!\n",
3802 __func__, tid->an->an_node.ni_macaddr, ":",
3803 tid->tid);
3804 ath_tx_tid_bar_unsuspend(sc, tid);
3805}
3806
3807static void
3809 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf)
3810{
3811
3813
3814 /*
3815 * If the current TID is running AMPDU, update
3816 * the BAW.
3817 */
3818 if (ath_tx_ampdu_running(sc, an, tid->tid) &&
3819 bf->bf_state.bfs_dobaw) {
3820 /*
3821 * Only remove the frame from the BAW if it's
3822 * been transmitted at least once; this means
3823 * the frame was in the BAW to begin with.
3824 */
3825 if (bf->bf_state.bfs_retries > 0) {
3826 ath_tx_update_baw(sc, an, tid, bf);
3827 bf->bf_state.bfs_dobaw = 0;
3828 }
3829#if 0
3830 /*
3831 * This has become a non-fatal error now
3832 */
3833 if (! bf->bf_state.bfs_addedbaw)
3834 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW
3835 "%s: wasn't added: seqno %d\n",
3836 __func__, SEQNO(bf->bf_state.bfs_seqno));
3837#endif
3838 }
3839
3840 /* Strip it out of an aggregate list if it was in one */
3841 bf->bf_next = NULL;
3842
3843 /* Insert on the free queue to be freed by the caller */
3844 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
3845}
3846
3847static void
3849 const char *pfx, struct ath_tid *tid, struct ath_buf *bf)
3850{
3851 struct ieee80211_node *ni = &an->an_node;
3852 struct ath_txq *txq;
3853 struct ieee80211_tx_ampdu *tap;
3854
3855 txq = sc->sc_ac2q[tid->ac];
3856 tap = ath_tx_get_tx_tid(an, tid->tid);
3857
3858 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3859 "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, "
3860 "seqno=%d, retry=%d\n",
3861 __func__,
3862 pfx,
3863 ni->ni_macaddr,
3864 ":",
3865 bf,
3867 bf->bf_state.bfs_dobaw,
3869 bf->bf_state.bfs_retries);
3870 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3871 "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3872 __func__,
3873 pfx,
3874 ni->ni_macaddr,
3875 ":",
3876 bf,
3877 txq->axq_qnum,
3878 txq->axq_depth,
3879 txq->axq_aggr_depth);
3880 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3881 "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, "
3882 "isfiltered=%d\n",
3883 __func__,
3884 pfx,
3885 ni->ni_macaddr,
3886 ":",
3887 bf,
3888 tid->axq_depth,
3889 tid->hwq_depth,
3890 tid->bar_wait,
3891 tid->isfiltered);
3892 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3893 "%s: %s: %6D: tid %d: "
3894 "sched=%d, paused=%d, "
3895 "incomp=%d, baw_head=%d, "
3896 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n",
3897 __func__,
3898 pfx,
3899 ni->ni_macaddr,
3900 ":",
3901 tid->tid,
3902 tid->sched, tid->paused,
3903 tid->incomp, tid->baw_head,
3904 tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
3905 ni->ni_txseqs[tid->tid]);
3906
3907 /* XXX Dump the frame, see what it is? */
3908 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
3909 ieee80211_dump_pkt(ni->ni_ic,
3910 mtod(bf->bf_m, const uint8_t *),
3911 bf->bf_m->m_len, 0, -1);
3912}
3913
3914/*
3915 * Free any packets currently pending in the software TX queue.
3916 *
3917 * This will be called when a node is being deleted.
3918 *
3919 * It can also be called on an active node during an interface
3920 * reset or state transition.
3921 *
3922 * (From Linux/reference):
3923 *
3924 * TODO: For frame(s) that are in the retry state, we will reuse the
3925 * sequence number(s) without setting the retry bit. The
3926 * alternative is to give up on these and BAR the receiver's window
3927 * forward.
3928 */
3929static void
3930ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an,
3931 struct ath_tid *tid, ath_bufhead *bf_cq)
3932{
3933 struct ath_buf *bf;
3934 struct ieee80211_tx_ampdu *tap;
3935 struct ieee80211_node *ni = &an->an_node;
3936 int t;
3937
3938 tap = ath_tx_get_tx_tid(an, tid->tid);
3939
3941
3942 /* Walk the queue, free frames */
3943 t = 0;
3944 for (;;) {
3945 bf = ATH_TID_FIRST(tid);
3946 if (bf == NULL) {
3947 break;
3948 }
3949
3950 if (t == 0) {
3951 ath_tx_tid_drain_print(sc, an, "norm", tid, bf);
3952// t = 1;
3953 }
3954
3955 ATH_TID_REMOVE(tid, bf, bf_list);
3956 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3957 }
3958
3959 /* And now, drain the filtered frame queue */
3960 t = 0;
3961 for (;;) {
3962 bf = ATH_TID_FILT_FIRST(tid);
3963 if (bf == NULL)
3964 break;
3965
3966 if (t == 0) {
3967 ath_tx_tid_drain_print(sc, an, "filt", tid, bf);
3968// t = 1;
3969 }
3970
3971 ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3972 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3973 }
3974
3975 /*
3976 * Override the clrdmask configuration for the next frame
3977 * in case there is some future transmission, just to get
3978 * the ball rolling.
3979 *
3980 * This won't hurt things if the TID is about to be freed.
3981 */
3982 ath_tx_set_clrdmask(sc, tid->an);
3983
3984 /*
3985 * Now that it's completed, grab the TID lock and update
3986 * the sequence number and BAW window.
3987 * Because sequence numbers have been assigned to frames
3988 * that haven't been sent yet, it's entirely possible
3989 * we'll be called with some pending frames that have not
3990 * been transmitted.
3991 *
3992 * The cleaner solution is to do the sequence number allocation
3993 * when the packet is first transmitted - and thus the "retries"
3994 * check above would be enough to update the BAW/seqno.
3995 */
3996
3997 /* But don't do it for non-QoS TIDs */
3998 if (tap) {
3999#if 1
4000 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4001 "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n",
4002 __func__,
4003 ni->ni_macaddr,
4004 ":",
4005 an,
4006 tid->tid,
4007 tap->txa_start);
4008#endif
4009 ni->ni_txseqs[tid->tid] = tap->txa_start;
4010 tid->baw_tail = tid->baw_head;
4011 }
4012}
4013
4014/*
4015 * Reset the TID state. This must be only called once the node has
4016 * had its frames flushed from this TID, to ensure that no other
4017 * pause / unpause logic can kick in.
4018 */
4019static void
4020ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)
4021{
4022
4023#if 0
4024 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0;
4025 tid->paused = tid->sched = tid->addba_tx_pending = 0;
4026 tid->incomp = tid->cleanup_inprogress = 0;
4027#endif
4028
4029 /*
4030 * If we have a bar_wait set, we need to unpause the TID
4031 * here. Otherwise once cleanup has finished, the TID won't
4032 * have the right paused counter.
4033 *
4034 * XXX I'm not going through resume here - I don't want the
4035 * node to be rescheuled just yet. This however should be
4036 * methodized!
4037 */
4038 if (tid->bar_wait) {
4039 if (tid->paused > 0) {
4040 tid->paused --;
4041 }
4042 }
4043
4044 /*
4045 * XXX same with a currently filtered TID.
4046 *
4047 * Since this is being called during a flush, we assume that
4048 * the filtered frame list is actually empty.
4049 *
4050 * XXX TODO: add in a check to ensure that the filtered queue
4051 * depth is actually 0!
4052 */
4053 if (tid->isfiltered) {
4054 if (tid->paused > 0) {
4055 tid->paused --;
4056 }
4057 }
4058
4059 /*
4060 * Clear BAR, filtered frames, scheduled and ADDBA pending.
4061 * The TID may be going through cleanup from the last association
4062 * where things in the BAW are still in the hardware queue.
4063 */
4064 tid->bar_wait = 0;
4065 tid->bar_tx = 0;
4066 tid->isfiltered = 0;
4067 tid->sched = 0;
4068 tid->addba_tx_pending = 0;
4069
4070 /*
4071 * XXX TODO: it may just be enough to walk the HWQs and mark
4072 * frames for that node as non-aggregate; or mark the ath_node
4073 * with something that indicates that aggregation is no longer
4074 * occurring. Then we can just toss the BAW complaints and
4075 * do a complete hard reset of state here - no pause, no
4076 * complete counter, etc.
4077 */
4078
4079}
4080
4081/*
4082 * Flush all software queued packets for the given node.
4083 *
4084 * This occurs when a completion handler frees the last buffer
4085 * for a node, and the node is thus freed. This causes the node
4086 * to be cleaned up, which ends up calling ath_tx_node_flush.
4087 */
4088void
4089ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
4090{
4091 int tid;
4092 ath_bufhead bf_cq;
4093 struct ath_buf *bf;
4094
4095 TAILQ_INIT(&bf_cq);
4096
4097 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p",
4098 &an->an_node);
4099
4100 ATH_TX_LOCK(sc);
4101 DPRINTF(sc, ATH_DEBUG_NODE,
4102 "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, "
4103 "swq_depth=%d, clrdmask=%d, leak_count=%d\n",
4104 __func__,
4105 an->an_node.ni_macaddr,
4106 ":",
4107 an->an_is_powersave,
4108 an->an_stack_psq,
4109 an->an_tim_set,
4110 an->an_swq_depth,
4111 an->clrdmask,
4112 an->an_leak_count);
4113
4114 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
4115 struct ath_tid *atid = &an->an_tid[tid];
4116
4117 /* Free packets */
4118 ath_tx_tid_drain(sc, an, atid, &bf_cq);
4119
4120 /* Remove this tid from the list of active tids */
4121 ath_tx_tid_unsched(sc, atid);
4122
4123 /* Reset the per-TID pause, BAR, etc state */
4124 ath_tx_tid_reset(sc, atid);
4125 }
4126
4127 /*
4128 * Clear global leak count
4129 */
4130 an->an_leak_count = 0;
4131 ATH_TX_UNLOCK(sc);
4132
4133 /* Handle completed frames */
4134 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4135 TAILQ_REMOVE(&bf_cq, bf, bf_list);
4136 ath_tx_default_comp(sc, bf, 0);
4137 }
4138}
4139
4140/*
4141 * Drain all the software TXQs currently with traffic queued.
4142 */
4143void
4144ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4145{
4146 struct ath_tid *tid;
4147 ath_bufhead bf_cq;
4148 struct ath_buf *bf;
4149
4150 TAILQ_INIT(&bf_cq);
4151 ATH_TX_LOCK(sc);
4152
4153 /*
4154 * Iterate over all active tids for the given txq,
4155 * flushing and unsched'ing them
4156 */
4157 while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4158 tid = TAILQ_FIRST(&txq->axq_tidq);
4159 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
4160 ath_tx_tid_unsched(sc, tid);
4161 }
4162
4163 ATH_TX_UNLOCK(sc);
4164
4165 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4166 TAILQ_REMOVE(&bf_cq, bf, bf_list);
4167 ath_tx_default_comp(sc, bf, 0);
4168 }
4169}
4170
4171/*
4172 * Handle completion of non-aggregate session frames.
4173 *
4174 * This (currently) doesn't implement software retransmission of
4175 * non-aggregate frames!
4176 *
4177 * Software retransmission of non-aggregate frames needs to obey
4178 * the strict sequence number ordering, and drop any frames that
4179 * will fail this.
4180 *
4181 * For now, filtered frames and frame transmission will cause
4182 * all kinds of issues. So we don't support them.
4183 *
4184 * So anyone queuing frames via ath_tx_normal_xmit() or
4185 * ath_tx_hw_queue_norm() must override and set CLRDMASK.
4186 */
4187void
4188ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4189{
4190 struct ieee80211_node *ni = bf->bf_node;
4191 struct ath_node *an = ATH_NODE(ni);
4192 int tid = bf->bf_state.bfs_tid;
4193 struct ath_tid *atid = &an->an_tid[tid];
4194 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4195
4196 /* The TID state is protected behind the TXQ lock */
4197 ATH_TX_LOCK(sc);
4198
4199 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
4200 __func__, bf, fail, atid->hwq_depth - 1);
4201
4202 atid->hwq_depth--;
4203
4204#if 0
4205 /*
4206 * If the frame was filtered, stick it on the filter frame
4207 * queue and complain about it. It shouldn't happen!
4208 */
4209 if ((ts->ts_status & HAL_TXERR_FILT) ||
4210 (ts->ts_status != 0 && atid->isfiltered)) {
4211 DPRINTF(sc, ATH_DEBUG_SW_TX,
4212 "%s: isfiltered=%d, ts_status=%d: huh?\n",
4213 __func__,
4214 atid->isfiltered,
4215 ts->ts_status);
4216 ath_tx_tid_filt_comp_buf(sc, atid, bf);
4217 }
4218#endif
4219 if (atid->isfiltered)
4220 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__);
4221 if (atid->hwq_depth < 0)
4222 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
4223 __func__, atid->hwq_depth);
4224
4225 /* If the TID is being cleaned up, track things */
4226 /* XXX refactor! */
4227 if (atid->cleanup_inprogress) {
4228 atid->incomp--;
4229 if (atid->incomp == 0) {
4230 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4231 "%s: TID %d: cleaned up! resume!\n",
4232 __func__, tid);
4233 atid->cleanup_inprogress = 0;
4234 ath_tx_tid_resume(sc, atid);
4235 }
4236 }
4237
4238 /*
4239 * If the queue is filtered, potentially mark it as complete
4240 * and reschedule it as needed.
4241 *
4242 * This is required as there may be a subsequent TX descriptor
4243 * for this end-node that has CLRDMASK set, so it's quite possible
4244 * that a filtered frame will be followed by a non-filtered
4245 * (complete or otherwise) frame.
4246 *
4247 * XXX should we do this before we complete the frame?
4248 */
4249 if (atid->isfiltered)
4251 ATH_TX_UNLOCK(sc);
4252
4253 /*
4254 * punt to rate control if we're not being cleaned up
4255 * during a hw queue drain and the frame wanted an ACK.
4256 */
4257 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4259 ts,
4260 bf->bf_state.bfs_pktlen,
4261 bf->bf_state.bfs_pktlen,
4262 1, (ts->ts_status == 0) ? 0 : 1);
4263
4264 ath_tx_default_comp(sc, bf, fail);
4265}
4266
4267/*
4268 * Handle cleanup of aggregate session packets that aren't
4269 * an A-MPDU.
4270 *
4271 * There's no need to update the BAW here - the session is being
4272 * torn down.
4273 */
4274static void
4276{
4277 struct ieee80211_node *ni = bf->bf_node;
4278 struct ath_node *an = ATH_NODE(ni);
4279 int tid = bf->bf_state.bfs_tid;
4280 struct ath_tid *atid = &an->an_tid[tid];
4281
4282 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
4283 __func__, tid, atid->incomp);
4284
4285 ATH_TX_LOCK(sc);
4286 atid->incomp--;
4287
4288 /* XXX refactor! */
4289 if (bf->bf_state.bfs_dobaw) {
4290 ath_tx_update_baw(sc, an, atid, bf);
4291 if (!bf->bf_state.bfs_addedbaw)
4292 DPRINTF(sc, ATH_DEBUG_SW_TX,
4293 "%s: wasn't added: seqno %d\n",
4294 __func__, SEQNO(bf->bf_state.bfs_seqno));
4295 }
4296
4297 if (atid->incomp == 0) {
4298 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4299 "%s: TID %d: cleaned up! resume!\n",
4300 __func__, tid);
4301 atid->cleanup_inprogress = 0;
4302 ath_tx_tid_resume(sc, atid);
4303 }
4304 ATH_TX_UNLOCK(sc);
4305
4306 ath_tx_default_comp(sc, bf, 0);
4307}
4308
4309/*
4310 * This as it currently stands is a bit dumb. Ideally we'd just
4311 * fail the frame the normal way and have it permanently fail
4312 * via the normal aggregate completion path.
4313 */
4314static void
4316 int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq)
4317{
4318 struct ath_tid *atid = &an->an_tid[tid];
4319 struct ath_buf *bf, *bf_next;
4320
4322
4323 /*
4324 * Remove this frame from the queue.
4325 */
4326 ATH_TID_REMOVE(atid, bf_head, bf_list);
4327
4328 /*
4329 * Loop over all the frames in the aggregate.
4330 */
4331 bf = bf_head;
4332 while (bf != NULL) {
4333 bf_next = bf->bf_next; /* next aggregate frame, or NULL */
4334
4335 /*
4336 * If it's been added to the BAW we need to kick
4337 * it out of the BAW before we continue.
4338 *
4339 * XXX if it's an aggregate, assert that it's in the
4340 * BAW - we shouldn't have it be in an aggregate
4341 * otherwise!
4342 */
4343 if (bf->bf_state.bfs_addedbaw) {
4344 ath_tx_update_baw(sc, an, atid, bf);
4345 bf->bf_state.bfs_dobaw = 0;
4346 }
4347
4348 /*
4349 * Give it the default completion handler.
4350 */
4352 bf->bf_next = NULL;
4353
4354 /*
4355 * Add it to the list to free.
4356 */
4357 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
4358
4359 /*
4360 * Now advance to the next frame in the aggregate.
4361 */
4362 bf = bf_next;
4363 }
4364}
4365
4366/*
4367 * Performs transmit side cleanup when TID changes from aggregated to
4368 * unaggregated and during reassociation.
4369 *
4370 * For now, this just tosses everything from the TID software queue
4371 * whether or not it has been retried and marks the TID as
4372 * pending completion if there's anything for this TID queued to
4373 * the hardware.
4374 *
4375 * The caller is responsible for pausing the TID and unpausing the
4376 * TID if no cleanup was required. Otherwise the cleanup path will
4377 * unpause the TID once the last hardware queued frame is completed.
4378 */
4379static void
4380ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid,
4381 ath_bufhead *bf_cq)
4382{
4383 struct ath_tid *atid = &an->an_tid[tid];
4384 struct ath_buf *bf, *bf_next;
4385
4387
4388 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4389 "%s: TID %d: called; inprogress=%d\n", __func__, tid,
4390 atid->cleanup_inprogress);
4391
4392 /*
4393 * Move the filtered frames to the TX queue, before
4394 * we run off and discard/process things.
4395 */
4396
4397 /* XXX this is really quite inefficient */
4398 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) {
4399 ATH_TID_FILT_REMOVE(atid, bf, bf_list);
4400 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4401 }
4402
4403 /*
4404 * Update the frames in the software TX queue:
4405 *
4406 * + Discard retry frames in the queue
4407 * + Fix the completion function to be non-aggregate
4408 */
4409 bf = ATH_TID_FIRST(atid);
4410 while (bf) {
4411 /*
4412 * Grab the next frame in the list, we may
4413 * be fiddling with the list.
4414 */
4415 bf_next = TAILQ_NEXT(bf, bf_list);
4416
4417 /*
4418 * Free the frame and all subframes.
4419 */
4420 ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq);
4421
4422 /*
4423 * Next frame!
4424 */
4425 bf = bf_next;
4426 }
4427
4428 /*
4429 * If there's anything in the hardware queue we wait
4430 * for the TID HWQ to empty.
4431 */
4432 if (atid->hwq_depth > 0) {
4433 /*
4434 * XXX how about we kill atid->incomp, and instead
4435 * replace it with a macro that checks that atid->hwq_depth
4436 * is 0?
4437 */
4438 atid->incomp = atid->hwq_depth;
4439 atid->cleanup_inprogress = 1;
4440 }
4441
4442 if (atid->cleanup_inprogress)
4443 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4444 "%s: TID %d: cleanup needed: %d packets\n",
4445 __func__, tid, atid->incomp);
4446
4447 /* Owner now must free completed frames */
4448}
4449
4450static struct ath_buf *
4452 struct ath_tid *tid, struct ath_buf *bf)
4453{
4454 struct ath_buf *nbf;
4455 int error;
4456
4457 /*
4458 * Clone the buffer. This will handle the dma unmap and
4459 * copy the node reference to the new buffer. If this
4460 * works out, 'bf' will have no DMA mapping, no mbuf
4461 * pointer and no node reference.
4462 */
4463 nbf = ath_buf_clone(sc, bf);
4464
4465#if 0
4466 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n",
4467 __func__);
4468#endif
4469
4470 if (nbf == NULL) {
4471 /* Failed to clone */
4472 DPRINTF(sc, ATH_DEBUG_XMIT,
4473 "%s: failed to clone a busy buffer\n",
4474 __func__);
4475 return NULL;
4476 }
4477
4478 /* Setup the dma for the new buffer */
4479 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
4480 if (error != 0) {
4481 DPRINTF(sc, ATH_DEBUG_XMIT,
4482 "%s: failed to setup dma for clone\n",
4483 __func__);
4484 /*
4485 * Put this at the head of the list, not tail;
4486 * that way it doesn't interfere with the
4487 * busy buffer logic (which uses the tail of
4488 * the list.)
4489 */
4490 ATH_TXBUF_LOCK(sc);
4491 ath_returnbuf_head(sc, nbf);
4492 ATH_TXBUF_UNLOCK(sc);
4493 return NULL;
4494 }
4495
4496 /* Update BAW if required, before we free the original buf */
4497 if (bf->bf_state.bfs_dobaw)
4498 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);
4499
4500 /* Free original buffer; return new buffer */
4501 ath_freebuf(sc, bf);
4502
4503 return nbf;
4504}
4505
4506/*
4507 * Handle retrying an unaggregate frame in an aggregate
4508 * session.
4509 *
4510 * If too many retries occur, pause the TID, wait for
4511 * any further retransmits (as there's no reason why
4512 * non-aggregate frames in an aggregate session are
4513 * transmitted in-order; they just have to be in-BAW)
4514 * and then queue a BAR.
4515 */
4516static void
4518{
4519 struct ieee80211_node *ni = bf->bf_node;
4520 struct ath_node *an = ATH_NODE(ni);
4521 int tid = bf->bf_state.bfs_tid;
4522 struct ath_tid *atid = &an->an_tid[tid];
4523 struct ieee80211_tx_ampdu *tap;
4524
4525 ATH_TX_LOCK(sc);
4526
4527 tap = ath_tx_get_tx_tid(an, tid);
4528
4529 /*
4530 * If the buffer is marked as busy, we can't directly
4531 * reuse it. Instead, try to clone the buffer.
4532 * If the clone is successful, recycle the old buffer.
4533 * If the clone is unsuccessful, set bfs_retries to max
4534 * to force the next bit of code to free the buffer
4535 * for us.
4536 */
4537 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4538 (bf->bf_flags & ATH_BUF_BUSY)) {
4539 struct ath_buf *nbf;
4540 nbf = ath_tx_retry_clone(sc, an, atid, bf);
4541 if (nbf)
4542 /* bf has been freed at this point */
4543 bf = nbf;
4544 else
4546 }
4547
4548 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4549 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4550 "%s: exceeded retries; seqno %d\n",
4551 __func__, SEQNO(bf->bf_state.bfs_seqno));
4553
4554 /* Update BAW anyway */
4555 if (bf->bf_state.bfs_dobaw) {
4556 ath_tx_update_baw(sc, an, atid, bf);
4557 if (! bf->bf_state.bfs_addedbaw)
4558 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4559 "%s: wasn't added: seqno %d\n",
4560 __func__, SEQNO(bf->bf_state.bfs_seqno));
4561 }
4562 bf->bf_state.bfs_dobaw = 0;
4563
4564 /* Suspend the TX queue and get ready to send the BAR */
4565 ath_tx_tid_bar_suspend(sc, atid);
4566
4567 /* Send the BAR if there are no other frames waiting */
4568 if (ath_tx_tid_bar_tx_ready(sc, atid))
4569 ath_tx_tid_bar_tx(sc, atid);
4570
4571 ATH_TX_UNLOCK(sc);
4572
4573 /* Free buffer, bf is free after this call */
4574 ath_tx_default_comp(sc, bf, 0);
4575 return;
4576 }
4577
4578 /*
4579 * This increments the retry counter as well as
4580 * sets the retry flag in the ath_buf and packet
4581 * body.
4582 */
4583 ath_tx_set_retry(sc, bf);
4585
4586 /*
4587 * Insert this at the head of the queue, so it's
4588 * retried before any current/subsequent frames.
4589 */
4590 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4591 ath_tx_tid_sched(sc, atid);
4592 /* Send the BAR if there are no other frames waiting */
4593 if (ath_tx_tid_bar_tx_ready(sc, atid))
4594 ath_tx_tid_bar_tx(sc, atid);
4595
4596 ATH_TX_UNLOCK(sc);
4597}
4598
4599/*
4600 * Common code for aggregate excessive retry/subframe retry.
4601 * If retrying, queues buffers to bf_q. If not, frees the
4602 * buffers.
4603 *
4604 * XXX should unify this with ath_tx_aggr_retry_unaggr()
4605 */
4606static int
4608 ath_bufhead *bf_q)
4609{
4610 struct ieee80211_node *ni = bf->bf_node;
4611 struct ath_node *an = ATH_NODE(ni);
4612 int tid = bf->bf_state.bfs_tid;
4613 struct ath_tid *atid = &an->an_tid[tid];
4614
4616
4617 /* XXX clr11naggr should be done for all subframes */
4620
4621 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
4622
4623 /*
4624 * If the buffer is marked as busy, we can't directly
4625 * reuse it. Instead, try to clone the buffer.
4626 * If the clone is successful, recycle the old buffer.
4627 * If the clone is unsuccessful, set bfs_retries to max
4628 * to force the next bit of code to free the buffer
4629 * for us.
4630 */
4631 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4632 (bf->bf_flags & ATH_BUF_BUSY)) {
4633 struct ath_buf *nbf;
4634 nbf = ath_tx_retry_clone(sc, an, atid, bf);
4635 if (nbf)
4636 /* bf has been freed at this point */
4637 bf = nbf;
4638 else
4640 }
4641
4642 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4644 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4645 "%s: max retries: seqno %d\n",
4646 __func__, SEQNO(bf->bf_state.bfs_seqno));
4647 ath_tx_update_baw(sc, an, atid, bf);
4648 if (!bf->bf_state.bfs_addedbaw)
4649 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4650 "%s: wasn't added: seqno %d\n",
4651 __func__, SEQNO(bf->bf_state.bfs_seqno));
4652 bf->bf_state.bfs_dobaw = 0;
4653 return 1;
4654 }
4655
4656 ath_tx_set_retry(sc, bf);
4658 bf->bf_next = NULL; /* Just to make sure */
4659
4660 /* Clear the aggregate state */
4661 bf->bf_state.bfs_aggr = 0;
4662 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */
4663 bf->bf_state.bfs_nframes = 1;
4664
4665 TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
4666 return 0;
4667}
4668
4669/*
4670 * error pkt completion for an aggregate destination
4671 */
4672static void
4673ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,
4674 struct ath_tid *tid)
4675{
4676 struct ieee80211_node *ni = bf_first->bf_node;
4677 struct ath_node *an = ATH_NODE(ni);
4678 struct ath_buf *bf_next, *bf;
4679 ath_bufhead bf_q;
4680 int drops = 0;
4681 struct ieee80211_tx_ampdu *tap;
4682 ath_bufhead bf_cq;
4683
4684 TAILQ_INIT(&bf_q);
4685 TAILQ_INIT(&bf_cq);
4686
4687 /*
4688 * Update rate control - all frames have failed.
4689 */
4690 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
4691 &bf_first->bf_status.ds_txstat,
4692 bf_first->bf_state.bfs_al,
4693 bf_first->bf_state.bfs_rc_maxpktlen,
4694 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);
4695
4696 ATH_TX_LOCK(sc);
4697 tap = ath_tx_get_tx_tid(an, tid->tid);
4699
4700 /* Retry all subframes */
4701 bf = bf_first;
4702 while (bf) {
4703 bf_next = bf->bf_next;
4704 bf->bf_next = NULL; /* Remove it from the aggr list */
4706 if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4707 drops++;
4708 bf->bf_next = NULL;
4709 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4710 }
4711 bf = bf_next;
4712 }
4713
4714 /* Prepend all frames to the beginning of the queue */
4715 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4716 TAILQ_REMOVE(&bf_q, bf, bf_list);
4717 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
4718 }
4719
4720 /*
4721 * Schedule the TID to be re-tried.
4722 */
4723 ath_tx_tid_sched(sc, tid);
4724
4725 /*
4726 * send bar if we dropped any frames
4727 *
4728 * Keep the txq lock held for now, as we need to ensure
4729 * that ni_txseqs[] is consistent (as it's being updated
4730 * in the ifnet TX context or raw TX context.)
4731 */
4732 if (drops) {
4733 /* Suspend the TX queue and get ready to send the BAR */
4734 ath_tx_tid_bar_suspend(sc, tid);
4735 }
4736
4737 /*
4738 * Send BAR if required
4739 */
4740 if (ath_tx_tid_bar_tx_ready(sc, tid))
4741 ath_tx_tid_bar_tx(sc, tid);
4742
4743 ATH_TX_UNLOCK(sc);
4744
4745 /* Complete frames which errored out */
4746 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4747 TAILQ_REMOVE(&bf_cq, bf, bf_list);
4748 ath_tx_default_comp(sc, bf, 0);
4749 }
4750}
4751
4752/*
4753 * Handle clean-up of packets from an aggregate list.
4754 *
4755 * There's no need to update the BAW here - the session is being
4756 * torn down.
4757 */
4758static void
4759ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
4760{
4761 struct ath_buf *bf, *bf_next;
4762 struct ieee80211_node *ni = bf_first->bf_node;
4763 struct ath_node *an = ATH_NODE(ni);
4764 int tid = bf_first->bf_state.bfs_tid;
4765 struct ath_tid *atid = &an->an_tid[tid];
4766
4767 ATH_TX_LOCK(sc);
4768
4769 /* update incomp */
4770 atid->incomp--;
4771
4772 /* Update the BAW */
4773 bf = bf_first;
4774 while (bf) {
4775 /* XXX refactor! */
4776 if (bf->bf_state.bfs_dobaw) {
4777 ath_tx_update_baw(sc, an, atid, bf);
4778 if (!bf->bf_state.bfs_addedbaw)
4779 DPRINTF(sc, ATH_DEBUG_SW_TX,
4780 "%s: wasn't added: seqno %d\n",
4781 __func__, SEQNO(bf->bf_state.bfs_seqno));
4782 }
4783 bf = bf->bf_next;
4784 }
4785
4786 if (atid->incomp == 0) {
4787 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4788 "%s: TID %d: cleaned up! resume!\n",
4789 __func__, tid);
4790 atid->cleanup_inprogress = 0;
4791 ath_tx_tid_resume(sc, atid);
4792 }
4793
4794 /* Send BAR if required */
4795 /* XXX why would we send a BAR when transitioning to non-aggregation? */
4796 /*
4797 * XXX TODO: we should likely just tear down the BAR state here,
4798 * rather than sending a BAR.
4799 */
4800 if (ath_tx_tid_bar_tx_ready(sc, atid))
4801 ath_tx_tid_bar_tx(sc, atid);
4802
4803 ATH_TX_UNLOCK(sc);
4804
4805 /* Handle frame completion as individual frames */
4806 bf = bf_first;
4807 while (bf) {
4808 bf_next = bf->bf_next;
4809 bf->bf_next = NULL;
4810 ath_tx_default_comp(sc, bf, 1);
4811 bf = bf_next;
4812 }
4813}
4814
4815/*
4816 * Handle completion of an set of aggregate frames.
4817 *
4818 * Note: the completion handler is the last descriptor in the aggregate,
4819 * not the last descriptor in the first frame.
4820 */
4821static void
4822ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first,
4823 int fail)
4824{
4825 //struct ath_desc *ds = bf->bf_lastds;
4826 struct ieee80211_node *ni = bf_first->bf_node;
4827 struct ath_node *an = ATH_NODE(ni);
4828 int tid = bf_first->bf_state.bfs_tid;
4829 struct ath_tid *atid = &an->an_tid[tid];
4830 struct ath_tx_status ts;
4831 struct ieee80211_tx_ampdu *tap;
4832 ath_bufhead bf_q;
4833 ath_bufhead bf_cq;
4834 int seq_st, tx_ok;
4835 int hasba, isaggr;
4836 uint32_t ba[2];
4837 struct ath_buf *bf, *bf_next;
4838 int ba_index;
4839 int drops = 0;
4840 int nframes = 0, nbad = 0, nf;
4841 int pktlen;
4842 int agglen, rc_agglen;
4843 /* XXX there's too much on the stack? */
4844 struct ath_rc_series rc[ATH_RC_NUM];
4845 int txseq;
4846
4847 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
4848 __func__, atid->hwq_depth);
4849
4850 /*
4851 * Take a copy; this may be needed -after- bf_first
4852 * has been completed and freed.
4853 */
4854 ts = bf_first->bf_status.ds_txstat;
4855 agglen = bf_first->bf_state.bfs_al;
4856 rc_agglen = bf_first->bf_state.bfs_rc_maxpktlen;
4857
4858 TAILQ_INIT(&bf_q);
4859 TAILQ_INIT(&bf_cq);
4860
4861 /* The TID state is kept behind the TXQ lock */
4862 ATH_TX_LOCK(sc);
4863
4864 atid->hwq_depth--;
4865 if (atid->hwq_depth < 0)
4866 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n",
4867 __func__, atid->hwq_depth);
4868
4869 /*
4870 * If the TID is filtered, handle completing the filter
4871 * transition before potentially kicking it to the cleanup
4872 * function.
4873 *
4874 * XXX this is duplicate work, ew.
4875 */
4876 if (atid->isfiltered)
4878
4879 /*
4880 * Punt cleanup to the relevant function, not our problem now
4881 */
4882 if (atid->cleanup_inprogress) {
4883 if (atid->isfiltered)
4884 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4885 "%s: isfiltered=1, normal_comp?\n",
4886 __func__);
4887 ATH_TX_UNLOCK(sc);
4888 ath_tx_comp_cleanup_aggr(sc, bf_first);
4889 return;
4890 }
4891
4892 /*
4893 * If the frame is filtered, transition to filtered frame
4894 * mode and add this to the filtered frame list.
4895 *
4896 * XXX TODO: figure out how this interoperates with
4897 * BAR, pause and cleanup states.
4898 */
4899 if ((ts.ts_status & HAL_TXERR_FILT) ||
4900 (ts.ts_status != 0 && atid->isfiltered)) {
4901 if (fail != 0)
4902 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4903 "%s: isfiltered=1, fail=%d\n", __func__, fail);
4904 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq);
4905
4906 /* Remove from BAW */
4907 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) {
4908 if (bf->bf_state.bfs_addedbaw)
4909 drops++;
4910 if (bf->bf_state.bfs_dobaw) {
4911 ath_tx_update_baw(sc, an, atid, bf);
4912 if (!bf->bf_state.bfs_addedbaw)
4913 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4914 "%s: wasn't added: seqno %d\n",
4915 __func__,
4916 SEQNO(bf->bf_state.bfs_seqno));
4917 }
4918 bf->bf_state.bfs_dobaw = 0;
4919 }
4920 /*
4921 * If any intermediate frames in the BAW were dropped when
4922 * handling filtering things, send a BAR.
4923 */
4924 if (drops)
4925 ath_tx_tid_bar_suspend(sc, atid);
4926
4927 /*
4928 * Finish up by sending a BAR if required and freeing
4929 * the frames outside of the TX lock.
4930 */
4931 goto finish_send_bar;
4932 }
4933
4934 /*
4935 * XXX for now, use the first frame in the aggregate for
4936 * XXX rate control completion; it's at least consistent.
4937 */
4938 pktlen = bf_first->bf_state.bfs_pktlen;
4939
4940 /*
4941 * Handle errors first!
4942 *
4943 * Here, handle _any_ error as a "exceeded retries" error.
4944 * Later on (when filtered frames are to be specially handled)
4945 * it'll have to be expanded.
4946 */
4947#if 0
4948 if (ts.ts_status & HAL_TXERR_XRETRY) {
4949#endif
4950 if (ts.ts_status != 0) {
4951 ATH_TX_UNLOCK(sc);
4952 ath_tx_comp_aggr_error(sc, bf_first, atid);
4953 return;
4954 }
4955
4956 tap = ath_tx_get_tx_tid(an, tid);
4957
4958 /*
4959 * extract starting sequence and block-ack bitmap
4960 */
4961 /* XXX endian-ness of seq_st, ba? */
4962 seq_st = ts.ts_seqnum;
4963 hasba = !! (ts.ts_flags & HAL_TX_BA);
4964 tx_ok = (ts.ts_status == 0);
4965 isaggr = bf_first->bf_state.bfs_aggr;
4966 ba[0] = ts.ts_ba_low;
4967 ba[1] = ts.ts_ba_high;
4968
4969 /*
4970 * Copy the TX completion status and the rate control
4971 * series from the first descriptor, as it may be freed
4972 * before the rate control code can get its grubby fingers
4973 * into things.
4974 */
4975 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
4976
4977 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4978 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, "
4979 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n",
4980 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
4981 isaggr, seq_st, hasba, ba[0], ba[1]);
4982
4983 /*
4984 * The reference driver doesn't do this; it simply ignores
4985 * this check in its entirety.
4986 *
4987 * I've seen this occur when using iperf to send traffic
4988 * out tid 1 - the aggregate frames are all marked as TID 1,
4989 * but the TXSTATUS has TID=0. So, let's just ignore this
4990 * check.
4991 */
4992#if 0
4993 /* Occasionally, the MAC sends a tx status for the wrong TID. */
4994 if (tid != ts.ts_tid) {
4995 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n",
4996 __func__, tid, ts.ts_tid);
4997 tx_ok = 0;
4998 }
4999#endif
5000
5001 /* AR5416 BA bug; this requires an interface reset */
5002 if (isaggr && tx_ok && (! hasba)) {
5003 device_printf(sc->sc_dev,
5004 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, "
5005 "seq_st=%d\n",
5006 __func__, hasba, tx_ok, isaggr, seq_st);
5007 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);
5008 /* And as we can't really trust the BA here .. */
5009 ba[0] = 0;
5010 ba[1] = 0;
5011 seq_st = 0;
5012#ifdef ATH_DEBUG
5013 ath_printtxbuf(sc, bf_first,
5014 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
5015#endif
5016 }
5017
5018 /*
5019 * Walk the list of frames, figure out which ones were correctly
5020 * sent and which weren't.
5021 */
5022 bf = bf_first;
5023 nf = bf_first->bf_state.bfs_nframes;
5024
5025 /* bf_first is going to be invalid once this list is walked */
5026 bf_first = NULL;
5027
5028 /*
5029 * Walk the list of completed frames and determine
5030 * which need to be completed and which need to be
5031 * retransmitted.
5032 *
5033 * For completed frames, the completion functions need
5034 * to be called at the end of this function as the last
5035 * node reference may free the node.
5036 *
5037 * Finally, since the TXQ lock can't be held during the
5038 * completion callback (to avoid lock recursion),
5039 * the completion calls have to be done outside of the
5040 * lock.
5041 */
5042 while (bf) {
5043 nframes++;
5044 ba_index = ATH_BA_INDEX(seq_st,
5045 SEQNO(bf->bf_state.bfs_seqno));
5046 bf_next = bf->bf_next;
5047 bf->bf_next = NULL; /* Remove it from the aggr list */
5048
5049 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5050 "%s: checking bf=%p seqno=%d; ack=%d\n",
5051 __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
5052 ATH_BA_ISSET(ba, ba_index));
5053
5054 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
5056 ath_tx_update_baw(sc, an, atid, bf);
5057 bf->bf_state.bfs_dobaw = 0;
5058 if (!bf->bf_state.bfs_addedbaw)
5059 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5060 "%s: wasn't added: seqno %d\n",
5061 __func__, SEQNO(bf->bf_state.bfs_seqno));
5062 bf->bf_next = NULL;
5063 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
5064 } else {
5066 if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
5067 drops++;
5068 bf->bf_next = NULL;
5069 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
5070 }
5071 nbad++;
5072 }
5073 bf = bf_next;
5074 }
5075
5076 /*
5077 * Now that the BAW updates have been done, unlock
5078 *
5079 * txseq is grabbed before the lock is released so we
5080 * have a consistent view of what -was- in the BAW.
5081 * Anything after this point will not yet have been
5082 * TXed.
5083 */
5084 txseq = tap->txa_start;
5085 ATH_TX_UNLOCK(sc);
5086
5087 if (nframes != nf)
5088 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5089 "%s: num frames seen=%d; bf nframes=%d\n",
5090 __func__, nframes, nf);
5091
5092 /*
5093 * Now we know how many frames were bad, call the rate
5094 * control code.
5095 */
5096 if (fail == 0) {
5097 ath_tx_update_ratectrl(sc, ni, rc, &ts, agglen, rc_agglen,
5098 nframes, nbad);
5099 }
5100
5101 /*
5102 * send bar if we dropped any frames
5103 */
5104 if (drops) {
5105 /* Suspend the TX queue and get ready to send the BAR */
5106 ATH_TX_LOCK(sc);
5107 ath_tx_tid_bar_suspend(sc, atid);
5108 ATH_TX_UNLOCK(sc);
5109 }
5110
5111 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5112 "%s: txa_start now %d\n", __func__, tap->txa_start);
5113
5114 ATH_TX_LOCK(sc);
5115
5116 /* Prepend all frames to the beginning of the queue */
5117 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
5118 TAILQ_REMOVE(&bf_q, bf, bf_list);
5119 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
5120 }
5121
5122 /*
5123 * Reschedule to grab some further frames.
5124 */
5125 ath_tx_tid_sched(sc, atid);
5126
5127 /*
5128 * If the queue is filtered, re-schedule as required.
5129 *
5130 * This is required as there may be a subsequent TX descriptor
5131 * for this end-node that has CLRDMASK set, so it's quite possible
5132 * that a filtered frame will be followed by a non-filtered
5133 * (complete or otherwise) frame.
5134 *
5135 * XXX should we do this before we complete the frame?
5136 */
5137 if (atid->isfiltered)
5139
5140finish_send_bar:
5141
5142 /*
5143 * Send BAR if required
5144 */
5145 if (ath_tx_tid_bar_tx_ready(sc, atid))
5146 ath_tx_tid_bar_tx(sc, atid);
5147
5148 ATH_TX_UNLOCK(sc);
5149
5150 /* Do deferred completion */
5151 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5152 TAILQ_REMOVE(&bf_cq, bf, bf_list);
5153 ath_tx_default_comp(sc, bf, 0);
5154 }
5155}
5156
5157/*
5158 * Handle completion of unaggregated frames in an ADDBA
5159 * session.
5160 *
5161 * Fail is set to 1 if the entry is being freed via a call to
5162 * ath_tx_draintxq().
5163 */
5164static void
5165ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
5166{
5167 struct ieee80211_node *ni = bf->bf_node;
5168 struct ath_node *an = ATH_NODE(ni);
5169 int tid = bf->bf_state.bfs_tid;
5170 struct ath_tid *atid = &an->an_tid[tid];
5171 struct ath_tx_status ts;
5172 int drops = 0;
5173
5174 /*
5175 * Take a copy of this; filtering/cloning the frame may free the
5176 * bf pointer.
5177 */
5178 ts = bf->bf_status.ds_txstat;
5179
5180 /*
5181 * Update rate control status here, before we possibly
5182 * punt to retry or cleanup.
5183 *
5184 * Do it outside of the TXQ lock.
5185 */
5186 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
5188 &bf->bf_status.ds_txstat,
5189 bf->bf_state.bfs_pktlen,
5190 bf->bf_state.bfs_pktlen,
5191 1, (ts.ts_status == 0) ? 0 : 1);
5192
5193 /*
5194 * This is called early so atid->hwq_depth can be tracked.
5195 * This unfortunately means that it's released and regrabbed
5196 * during retry and cleanup. That's rather inefficient.
5197 */
5198 ATH_TX_LOCK(sc);
5199
5200 if (tid == IEEE80211_NONQOS_TID)
5201 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__);
5202
5203 DPRINTF(sc, ATH_DEBUG_SW_TX,
5204 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n",
5205 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
5206 SEQNO(bf->bf_state.bfs_seqno));
5207
5208 atid->hwq_depth--;
5209 if (atid->hwq_depth < 0)
5210 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
5211 __func__, atid->hwq_depth);
5212
5213 /*
5214 * If the TID is filtered, handle completing the filter
5215 * transition before potentially kicking it to the cleanup
5216 * function.
5217 */
5218 if (atid->isfiltered)
5220
5221 /*
5222 * If a cleanup is in progress, punt to comp_cleanup;
5223 * rather than handling it here. It's thus their
5224 * responsibility to clean up, call the completion
5225 * function in net80211, etc.
5226 */
5227 if (atid->cleanup_inprogress) {
5228 if (atid->isfiltered)
5229 DPRINTF(sc, ATH_DEBUG_SW_TX,
5230 "%s: isfiltered=1, normal_comp?\n",
5231 __func__);
5232 ATH_TX_UNLOCK(sc);
5233 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n",
5234 __func__);
5236 return;
5237 }
5238
5239 /*
5240 * XXX TODO: how does cleanup, BAR and filtered frame handling
5241 * overlap?
5242 *
5243 * If the frame is filtered OR if it's any failure but
5244 * the TID is filtered, the frame must be added to the
5245 * filtered frame list.
5246 *
5247 * However - a busy buffer can't be added to the filtered
5248 * list as it will end up being recycled without having
5249 * been made available for the hardware.
5250 */
5251 if ((ts.ts_status & HAL_TXERR_FILT) ||
5252 (ts.ts_status != 0 && atid->isfiltered)) {
5253 int freeframe;
5254
5255 if (fail != 0)
5256 DPRINTF(sc, ATH_DEBUG_SW_TX,
5257 "%s: isfiltered=1, fail=%d\n",
5258 __func__, fail);
5259 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf);
5260 /*
5261 * If freeframe=0 then bf is no longer ours; don't
5262 * touch it.
5263 */
5264 if (freeframe) {
5265 /* Remove from BAW */
5266 if (bf->bf_state.bfs_addedbaw)
5267 drops++;
5268 if (bf->bf_state.bfs_dobaw) {
5269 ath_tx_update_baw(sc, an, atid, bf);
5270 if (!bf->bf_state.bfs_addedbaw)
5271 DPRINTF(sc, ATH_DEBUG_SW_TX,
5272 "%s: wasn't added: seqno %d\n",
5273 __func__, SEQNO(bf->bf_state.bfs_seqno));
5274 }
5275 bf->bf_state.bfs_dobaw = 0;
5276 }
5277
5278 /*
5279 * If the frame couldn't be filtered, treat it as a drop and
5280 * prepare to send a BAR.
5281 */
5282 if (freeframe && drops)
5283 ath_tx_tid_bar_suspend(sc, atid);
5284
5285 /*
5286 * Send BAR if required
5287 */
5288 if (ath_tx_tid_bar_tx_ready(sc, atid))
5289 ath_tx_tid_bar_tx(sc, atid);
5290
5291 ATH_TX_UNLOCK(sc);
5292 /*
5293 * If freeframe is set, then the frame couldn't be
5294 * cloned and bf is still valid. Just complete/free it.
5295 */
5296 if (freeframe)
5297 ath_tx_default_comp(sc, bf, fail);
5298
5299 return;
5300 }
5301 /*
5302 * Don't bother with the retry check if all frames
5303 * are being failed (eg during queue deletion.)
5304 */
5305#if 0
5306 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
5307#endif
5308 if (fail == 0 && ts.ts_status != 0) {
5309 ATH_TX_UNLOCK(sc);
5310 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n",
5311 __func__);
5313 return;
5314 }
5315
5316 /* Success? Complete */
5317 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
5318 __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
5319 if (bf->bf_state.bfs_dobaw) {
5320 ath_tx_update_baw(sc, an, atid, bf);
5321 bf->bf_state.bfs_dobaw = 0;
5322 if (!bf->bf_state.bfs_addedbaw)
5323 DPRINTF(sc, ATH_DEBUG_SW_TX,
5324 "%s: wasn't added: seqno %d\n",
5325 __func__, SEQNO(bf->bf_state.bfs_seqno));
5326 }
5327
5328 /*
5329 * If the queue is filtered, re-schedule as required.
5330 *
5331 * This is required as there may be a subsequent TX descriptor
5332 * for this end-node that has CLRDMASK set, so it's quite possible
5333 * that a filtered frame will be followed by a non-filtered
5334 * (complete or otherwise) frame.
5335 *
5336 * XXX should we do this before we complete the frame?
5337 */
5338 if (atid->isfiltered)
5340
5341 /*
5342 * Send BAR if required
5343 */
5344 if (ath_tx_tid_bar_tx_ready(sc, atid))
5345 ath_tx_tid_bar_tx(sc, atid);
5346
5347 ATH_TX_UNLOCK(sc);
5348
5349 ath_tx_default_comp(sc, bf, fail);
5350 /* bf is freed at this point */
5351}
5352
5353void
5354ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
5355{
5356 if (bf->bf_state.bfs_aggr)
5357 ath_tx_aggr_comp_aggr(sc, bf, fail);
5358 else
5359 ath_tx_aggr_comp_unaggr(sc, bf, fail);
5360}
5361
5362/*
5363 * Grab the software queue depth that we COULD transmit.
5364 *
5365 * This includes checks if it's in the BAW, whether it's a frame
5366 * that is supposed to be in the BAW. Other checks could be done;
5367 * but for now let's try and avoid doing the whole of ath_tx_form_aggr()
5368 * here.
5369 */
5370static int
5372 struct ath_tid *tid)
5373{
5374 struct ath_buf *bf;
5375 struct ieee80211_tx_ampdu *tap;
5376 int nbytes = 0;
5377
5379
5380 tap = ath_tx_get_tx_tid(an, tid->tid);
5381
5382 /*
5383 * Iterate over each buffer and sum the pkt_len.
5384 * Bail if we exceed ATH_AGGR_MAXSIZE bytes; we won't
5385 * ever queue more than that in a single frame.
5386 */
5387 TAILQ_FOREACH(bf, &tid->tid_q, bf_list) {
5388 /*
5389 * TODO: I'm not sure if we're going to hit cases where
5390 * no frames get sent because the list is empty.
5391 */
5392
5393 /* Check if it's in the BAW */
5394 if (tap != NULL && (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
5395 SEQNO(bf->bf_state.bfs_seqno)))) {
5396 break;
5397 }
5398
5399 /* Check if it's even supposed to be in the BAW */
5400 if (! bf->bf_state.bfs_dobaw) {
5401 break;
5402 }
5403
5404 nbytes += bf->bf_state.bfs_pktlen;
5405 if (nbytes >= ATH_AGGR_MAXSIZE)
5406 break;
5407
5408 /*
5409 * Check if we're likely going to leak a frame
5410 * as part of a PSPOLL. Break out at this point;
5411 * we're only going to send a single frame anyway.
5412 */
5413 if (an->an_leak_count) {
5414 break;
5415 }
5416 }
5417
5418 return MIN(nbytes, ATH_AGGR_MAXSIZE);
5419}
5420
5421/*
5422 * Schedule some packets from the given node/TID to the hardware.
5423 *
5424 * This is the aggregate version.
5425 */
5426void
5428 struct ath_tid *tid)
5429{
5430 struct ath_buf *bf;
5431 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5432 struct ieee80211_tx_ampdu *tap;
5433 ATH_AGGR_STATUS status;
5434 ath_bufhead bf_q;
5435 int swq_pktbytes;
5436
5437 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
5439
5440 /*
5441 * XXX TODO: If we're called for a queue that we're leaking frames to,
5442 * ensure we only leak one.
5443 */
5444
5445 tap = ath_tx_get_tx_tid(an, tid->tid);
5446
5447 if (tid->tid == IEEE80211_NONQOS_TID)
5448 DPRINTF(sc, ATH_DEBUG_SW_TX,
5449 "%s: called for TID=NONQOS_TID?\n", __func__);
5450
5451 for (;;) {
5452 status = ATH_AGGR_DONE;
5453
5454 /*
5455 * If the upper layer has paused the TID, don't
5456 * queue any further packets.
5457 *
5458 * This can also occur from the completion task because
5459 * of packet loss; but as its serialised with this code,
5460 * it won't "appear" half way through queuing packets.
5461 */
5462 if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5463 break;
5464
5465 bf = ATH_TID_FIRST(tid);
5466 if (bf == NULL) {
5467 break;
5468 }
5469
5470 /*
5471 * If the packet doesn't fall within the BAW (eg a NULL
5472 * data frame), schedule it directly; continue.
5473 */
5474 if (! bf->bf_state.bfs_dobaw) {
5475 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5476 "%s: non-baw packet\n",
5477 __func__);
5478 ATH_TID_REMOVE(tid, bf, bf_list);
5479
5480 if (bf->bf_state.bfs_nframes > 1)
5481 DPRINTF(sc, ATH_DEBUG_SW_TX,
5482 "%s: aggr=%d, nframes=%d\n",
5483 __func__,
5484 bf->bf_state.bfs_aggr,
5485 bf->bf_state.bfs_nframes);
5486
5487 /*
5488 * This shouldn't happen - such frames shouldn't
5489 * ever have been queued as an aggregate in the
5490 * first place. However, make sure the fields
5491 * are correctly setup just to be totally sure.
5492 */
5493 bf->bf_state.bfs_aggr = 0;
5494 bf->bf_state.bfs_nframes = 1;
5495
5496 /* Update CLRDMASK just before this frame is queued */
5497 ath_tx_update_clrdmask(sc, tid, bf);
5498
5499 ath_tx_do_ratelookup(sc, bf, tid->tid,
5500 bf->bf_state.bfs_pktlen, false);
5501 ath_tx_calc_duration(sc, bf);
5502 ath_tx_calc_protection(sc, bf);
5503 ath_tx_set_rtscts(sc, bf);
5505 ath_tx_setds(sc, bf);
5507
5509
5510 /* Queue the packet; continue */
5511 goto queuepkt;
5512 }
5513
5514 TAILQ_INIT(&bf_q);
5515
5516 /*
5517 * Loop over the swq to find out how long
5518 * each packet is (up until 64k) and provide that
5519 * to the rate control lookup.
5520 */
5521 swq_pktbytes = ath_tx_tid_swq_depth_bytes(sc, an, tid);
5522 ath_tx_do_ratelookup(sc, bf, tid->tid, swq_pktbytes, true);
5523
5524 /*
5525 * Note this only is used for the fragment paths and
5526 * should really be rethought out if we want to do
5527 * things like an RTS burst across >1 aggregate.
5528 */
5529 ath_tx_calc_duration(sc, bf);
5530 ath_tx_calc_protection(sc, bf);
5531
5532 ath_tx_set_rtscts(sc, bf);
5534
5535 status = ath_tx_form_aggr(sc, an, tid, &bf_q);
5536
5537 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5538 "%s: ath_tx_form_aggr() status=%d\n", __func__, status);
5539
5540 /*
5541 * No frames to be picked up - out of BAW
5542 */
5543 if (TAILQ_EMPTY(&bf_q))
5544 break;
5545
5546 /*
5547 * This assumes that the descriptor list in the ath_bufhead
5548 * are already linked together via bf_next pointers.
5549 */
5550 bf = TAILQ_FIRST(&bf_q);
5551
5552 if (status == ATH_AGGR_8K_LIMITED)
5554
5555 /*
5556 * If it's the only frame send as non-aggregate
5557 * assume that ath_tx_form_aggr() has checked
5558 * whether it's in the BAW and added it appropriately.
5559 */
5560 if (bf->bf_state.bfs_nframes == 1) {
5561 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5562 "%s: single-frame aggregate\n", __func__);
5563
5564 /* Update CLRDMASK just before this frame is queued */
5565 ath_tx_update_clrdmask(sc, tid, bf);
5566
5567 bf->bf_state.bfs_aggr = 0;
5568 bf->bf_state.bfs_ndelim = 0;
5569 ath_tx_setds(sc, bf);
5571 if (status == ATH_AGGR_BAW_CLOSED)
5573 else
5575 } else {
5576 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5577 "%s: multi-frame aggregate: %d frames, "
5578 "length %d\n",
5579 __func__, bf->bf_state.bfs_nframes,
5580 bf->bf_state.bfs_al);
5581 bf->bf_state.bfs_aggr = 1;
5584
5585 /* Update CLRDMASK just before this frame is queued */
5586 ath_tx_update_clrdmask(sc, tid, bf);
5587
5588 /*
5589 * Calculate the duration/protection as required.
5590 */
5591 ath_tx_calc_duration(sc, bf);
5592 ath_tx_calc_protection(sc, bf);
5593
5594 /*
5595 * Update the rate and rtscts information based on the
5596 * rate decision made by the rate control code;
5597 * the first frame in the aggregate needs it.
5598 */
5599 ath_tx_set_rtscts(sc, bf);
5600
5601 /*
5602 * Setup the relevant descriptor fields
5603 * for aggregation. The first descriptor
5604 * already points to the rest in the chain.
5605 */
5606 ath_tx_setds_11n(sc, bf);
5607 }
5608 queuepkt:
5609 /* Set completion handler, multi-frame aggregate or not */
5611
5612 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
5613 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__);
5614
5615 /*
5616 * Update leak count and frame config if were leaking frames.
5617 *
5618 * XXX TODO: it should update all frames in an aggregate
5619 * correctly!
5620 */
5621 ath_tx_leak_count_update(sc, tid, bf);
5622
5623 /* Punt to txq */
5624 ath_tx_handoff(sc, txq, bf);
5625
5626 /* Track outstanding buffer count to hardware */
5627 /* aggregates are "one" buffer */
5628 tid->hwq_depth++;
5629
5630 /*
5631 * Break out if ath_tx_form_aggr() indicated
5632 * there can't be any further progress (eg BAW is full.)
5633 * Checking for an empty txq is done above.
5634 *
5635 * XXX locking on txq here?
5636 */
5637 /* XXX TXQ locking */
5638 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5639 (status == ATH_AGGR_BAW_CLOSED ||
5640 status == ATH_AGGR_LEAK_CLOSED))
5641 break;
5642 }
5643}
5644
5645/*
5646 * Schedule some packets from the given node/TID to the hardware.
5647 *
5648 * XXX TODO: this routine doesn't enforce the maximum TXQ depth.
5649 * It just dumps frames into the TXQ. We should limit how deep
5650 * the transmit queue can grow for frames dispatched to the given
5651 * TXQ.
5652 *
5653 * To avoid locking issues, either we need to own the TXQ lock
5654 * at this point, or we need to pass in the maximum frame count
5655 * from the caller.
5656 */
5657void
5659 struct ath_tid *tid)
5660{
5661 struct ath_buf *bf;
5662 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5663
5664 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
5665 __func__, an, tid->tid);
5666
5668
5669 /* Check - is AMPDU pending or running? then print out something */
5670 if (ath_tx_ampdu_pending(sc, an, tid->tid))
5671 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n",
5672 __func__, tid->tid);
5673 if (ath_tx_ampdu_running(sc, an, tid->tid))
5674 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n",
5675 __func__, tid->tid);
5676
5677 for (;;) {
5678 /*
5679 * If the upper layers have paused the TID, don't
5680 * queue any further packets.
5681 *
5682 * XXX if we are leaking frames, make sure we decrement
5683 * that counter _and_ we continue here.
5684 */
5685 if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5686 break;
5687
5688 bf = ATH_TID_FIRST(tid);
5689 if (bf == NULL) {
5690 break;
5691 }
5692
5693 ATH_TID_REMOVE(tid, bf, bf_list);
5694
5695 /* Sanity check! */
5696 if (tid->tid != bf->bf_state.bfs_tid) {
5697 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !="
5698 " tid %d\n", __func__, bf->bf_state.bfs_tid,
5699 tid->tid);
5700 }
5701 /* Normal completion handler */
5703
5704 /*
5705 * Override this for now, until the non-aggregate
5706 * completion handler correctly handles software retransmits.
5707 */
5709
5710 /* Update CLRDMASK just before this frame is queued */
5711 ath_tx_update_clrdmask(sc, tid, bf);
5712
5713 /* Program descriptors + rate control */
5714 ath_tx_do_ratelookup(sc, bf, tid->tid,
5715 bf->bf_state.bfs_pktlen, false);
5716 ath_tx_calc_duration(sc, bf);
5717 ath_tx_calc_protection(sc, bf);
5718 ath_tx_set_rtscts(sc, bf);
5720 ath_tx_setds(sc, bf);
5721
5722 /*
5723 * Update the current leak count if
5724 * we're leaking frames; and set the
5725 * MORE flag as appropriate.
5726 */
5727 ath_tx_leak_count_update(sc, tid, bf);
5728
5729 /* Track outstanding buffer count to hardware */
5730 /* aggregates are "one" buffer */
5731 tid->hwq_depth++;
5732
5733 /* Punt to hardware or software txq */
5734 ath_tx_handoff(sc, txq, bf);
5735 }
5736}
5737
5738/*
5739 * Schedule some packets to the given hardware queue.
5740 *
5741 * This function walks the list of TIDs (ie, ath_node TIDs
5742 * with queued traffic) and attempts to schedule traffic
5743 * from them.
5744 *
5745 * TID scheduling is implemented as a FIFO, with TIDs being
5746 * added to the end of the queue after some frames have been
5747 * scheduled.
5748 */
5749void
5750ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5751{
5752 struct ath_tid *tid, *next, *last;
5753
5755
5756 /*
5757 * For non-EDMA chips, aggr frames that have been built are
5758 * in axq_aggr_depth, whether they've been scheduled or not.
5759 * There's no FIFO, so txq->axq_depth is what's been scheduled
5760 * to the hardware.
5761 *
5762 * For EDMA chips, we do it in two stages. The existing code
5763 * builds a list of frames to go to the hardware and the EDMA
5764 * code turns it into a single entry to push into the FIFO.
5765 * That way we don't take up one packet per FIFO slot.
5766 * We do push one aggregate per FIFO slot though, just to keep
5767 * things simple.
5768 *
5769 * The FIFO depth is what's in the hardware; the txq->axq_depth
5770 * is what's been scheduled to the FIFO.
5771 *
5772 * fifo.axq_depth is the number of frames (or aggregates) pushed
5773 * into the EDMA FIFO. For multi-frame lists, this is the number
5774 * of frames pushed in.
5775 * axq_fifo_depth is the number of FIFO slots currently busy.
5776 */
5777
5778 /* For EDMA and non-EDMA, check built/scheduled against aggr limit */
5779 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr) {
5781 return;
5782 }
5783
5784 /*
5785 * For non-EDMA chips, axq_depth is the "what's scheduled to
5786 * the hardware list". For EDMA it's "What's built for the hardware"
5787 * and fifo.axq_depth is how many frames have been dispatched
5788 * already to the hardware.
5789 */
5790 if (txq->axq_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_nonaggr) {
5792 return;
5793 }
5794
5795 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5796
5797 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5798 /*
5799 * Suspend paused queues here; they'll be resumed
5800 * once the addba completes or times out.
5801 */
5802 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
5803 __func__, tid->tid, tid->paused);
5805 /*
5806 * This node may be in power-save and we're leaking
5807 * a frame; be careful.
5808 */
5809 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
5810 goto loop_done;
5811 }
5812 if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
5814 else
5816
5817 /* Not empty? Re-schedule */
5818 if (tid->axq_depth != 0)
5819 ath_tx_tid_sched(sc, tid);
5820
5821 /*
5822 * Give the software queue time to aggregate more
5823 * packets. If we aren't running aggregation then
5824 * we should still limit the hardware queue depth.
5825 */
5826 /* XXX TXQ locking */
5827 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5828 break;
5829 }
5830 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5831 break;
5832 }
5833loop_done:
5834 /*
5835 * If this was the last entry on the original list, stop.
5836 * Otherwise nodes that have been rescheduled onto the end
5837 * of the TID FIFO list will just keep being rescheduled.
5838 *
5839 * XXX What should we do about nodes that were paused
5840 * but are pending a leaking frame in response to a ps-poll?
5841 * They'll be put at the front of the list; so they'll
5842 * prematurely trigger this condition! Ew.
5843 */
5844 if (tid == last)
5845 break;
5846 }
5847}
5848
5849/*
5850 * TX addba handling
5851 */
5852
5853/*
5854 * Return net80211 TID struct pointer, or NULL for none
5855 */
5856struct ieee80211_tx_ampdu *
5857ath_tx_get_tx_tid(struct ath_node *an, int tid)
5858{
5859 struct ieee80211_node *ni = &an->an_node;
5860 struct ieee80211_tx_ampdu *tap;
5861
5862 if (tid == IEEE80211_NONQOS_TID)
5863 return NULL;
5864
5865 tap = &ni->ni_tx_ampdu[tid];
5866 return tap;
5867}
5868
5869/*
5870 * Is AMPDU-TX running?
5871 */
5872static int
5873ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
5874{
5875 struct ieee80211_tx_ampdu *tap;
5876
5877 if (tid == IEEE80211_NONQOS_TID)
5878 return 0;
5879
5880 tap = ath_tx_get_tx_tid(an, tid);
5881 if (tap == NULL)
5882 return 0; /* Not valid; default to not running */
5883
5884 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
5885}
5886
5887/*
5888 * Is AMPDU-TX negotiation pending?
5889 */
5890static int
5891ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
5892{
5893 struct ieee80211_tx_ampdu *tap;
5894
5895 if (tid == IEEE80211_NONQOS_TID)
5896 return 0;
5897
5898 tap = ath_tx_get_tx_tid(an, tid);
5899 if (tap == NULL)
5900 return 0; /* Not valid; default to not pending */
5901
5902 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
5903}
5904
5905/*
5906 * Is AMPDU-TX pending for the given TID?
5907 */
5908
5909/*
5910 * Method to handle sending an ADDBA request.
5911 *
5912 * We tap this so the relevant flags can be set to pause the TID
5913 * whilst waiting for the response.
5914 *
5915 * XXX there's no timeout handler we can override?
5916 */
5917int
5918ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5919 int dialogtoken, int baparamset, int batimeout)
5920{
5921 struct ath_softc *sc = ni->ni_ic->ic_softc;
5922 int tid = tap->txa_tid;
5923 struct ath_node *an = ATH_NODE(ni);
5924 struct ath_tid *atid = &an->an_tid[tid];
5925
5926 /*
5927 * XXX danger Will Robinson!
5928 *
5929 * Although the taskqueue may be running and scheduling some more
5930 * packets, these should all be _before_ the addba sequence number.
5931 * However, net80211 will keep self-assigning sequence numbers
5932 * until addba has been negotiated.
5933 *
5934 * In the past, these packets would be "paused" (which still works
5935 * fine, as they're being scheduled to the driver in the same
5936 * serialised method which is calling the addba request routine)
5937 * and when the aggregation session begins, they'll be dequeued
5938 * as aggregate packets and added to the BAW. However, now there's
5939 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
5940 * packets. Thus they never get included in the BAW tracking and
5941 * this can cause the initial burst of packets after the addba
5942 * negotiation to "hang", as they quickly fall outside the BAW.
5943 *
5944 * The "eventual" solution should be to tag these packets with
5945 * dobaw. Although net80211 has given us a sequence number,
5946 * it'll be "after" the left edge of the BAW and thus it'll
5947 * fall within it.
5948 */
5949 ATH_TX_LOCK(sc);
5950 /*
5951 * This is a bit annoying. Until net80211 HT code inherits some
5952 * (any) locking, we may have this called in parallel BUT only
5953 * one response/timeout will be called. Grr.
5954 */
5955 if (atid->addba_tx_pending == 0) {
5956 ath_tx_tid_pause(sc, atid);
5957 atid->addba_tx_pending = 1;
5958 }
5959 ATH_TX_UNLOCK(sc);
5960
5961 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5962 "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",
5963 __func__,
5964 ni->ni_macaddr,
5965 ":",
5966 dialogtoken, baparamset, batimeout);
5967 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5968 "%s: txa_start=%d, ni_txseqs=%d\n",
5969 __func__, tap->txa_start, ni->ni_txseqs[tid]);
5970
5971 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5972 batimeout);
5973}
5974
5975/*
5976 * Handle an ADDBA response.
5977 *
5978 * We unpause the queue so TX'ing can resume.
5979 *
5980 * Any packets TX'ed from this point should be "aggregate" (whether
5981 * aggregate or not) so the BAW is updated.
5982 *
5983 * Note! net80211 keeps self-assigning sequence numbers until
5984 * ampdu is negotiated. This means the initially-negotiated BAW left
5985 * edge won't match the ni->ni_txseq.
5986 *
5987 * So, being very dirty, the BAW left edge is "slid" here to match
5988 * ni->ni_txseq.
5989 *
5990 * What likely SHOULD happen is that all packets subsequent to the
5991 * addba request should be tagged as aggregate and queued as non-aggregate
5992 * frames; thus updating the BAW. For now though, I'll just slide the
5993 * window.
5994 */
5995int
5996ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5997 int status, int code, int batimeout)
5998{
5999 struct ath_softc *sc = ni->ni_ic->ic_softc;
6000 int tid = tap->txa_tid;
6001 struct ath_node *an = ATH_NODE(ni);
6002 struct ath_tid *atid = &an->an_tid[tid];
6003 int r;
6004
6005 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6006 "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__,
6007 ni->ni_macaddr,
6008 ":",
6009 status, code, batimeout);
6010
6011 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6012 "%s: txa_start=%d, ni_txseqs=%d\n",
6013 __func__, tap->txa_start, ni->ni_txseqs[tid]);
6014
6015 /*
6016 * Call this first, so the interface flags get updated
6017 * before the TID is unpaused. Otherwise a race condition
6018 * exists where the unpaused TID still doesn't yet have
6019 * IEEE80211_AGGR_RUNNING set.
6020 */
6021 r = sc->sc_addba_response(ni, tap, status, code, batimeout);
6022
6023 ATH_TX_LOCK(sc);
6024 atid->addba_tx_pending = 0;
6025 /*
6026 * XXX dirty!
6027 * Slide the BAW left edge to wherever net80211 left it for us.
6028 * Read above for more information.
6029 */
6030 tap->txa_start = ni->ni_txseqs[tid];
6031 ath_tx_tid_resume(sc, atid);
6032 ATH_TX_UNLOCK(sc);
6033 return r;
6034}
6035
6036/*
6037 * Stop ADDBA on a queue.
6038 *
6039 * This can be called whilst BAR TX is currently active on the queue,
6040 * so make sure this is unblocked before continuing.
6041 */
6042void
6043ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
6044{
6045 struct ath_softc *sc = ni->ni_ic->ic_softc;
6046 int tid = tap->txa_tid;
6047 struct ath_node *an = ATH_NODE(ni);
6048 struct ath_tid *atid = &an->an_tid[tid];
6049 ath_bufhead bf_cq;
6050 struct ath_buf *bf;
6051
6052 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n",
6053 __func__,
6054 ni->ni_macaddr,
6055 ":");
6056
6057 /*
6058 * Pause TID traffic early, so there aren't any races
6059 * Unblock the pending BAR held traffic, if it's currently paused.
6060 */
6061 ATH_TX_LOCK(sc);
6062 ath_tx_tid_pause(sc, atid);
6063 if (atid->bar_wait) {
6064 /*
6065 * bar_unsuspend() expects bar_tx == 1, as it should be
6066 * called from the TX completion path. This quietens
6067 * the warning. It's cleared for us anyway.
6068 */
6069 atid->bar_tx = 1;
6070 ath_tx_tid_bar_unsuspend(sc, atid);
6071 }
6072 ATH_TX_UNLOCK(sc);
6073
6074 /* There's no need to hold the TXQ lock here */
6075 sc->sc_addba_stop(ni, tap);
6076
6077 /*
6078 * ath_tx_tid_cleanup will resume the TID if possible, otherwise
6079 * it'll set the cleanup flag, and it'll be unpaused once
6080 * things have been cleaned up.
6081 */
6082 TAILQ_INIT(&bf_cq);
6083 ATH_TX_LOCK(sc);
6084
6085 /*
6086 * In case there's a followup call to this, only call it
6087 * if we don't have a cleanup in progress.
6088 *
6089 * Since we've paused the queue above, we need to make
6090 * sure we unpause if there's already a cleanup in
6091 * progress - it means something else is also doing
6092 * this stuff, so we don't need to also keep it paused.
6093 */
6094 if (atid->cleanup_inprogress) {
6095 ath_tx_tid_resume(sc, atid);
6096 } else {
6097 ath_tx_tid_cleanup(sc, an, tid, &bf_cq);
6098 /*
6099 * Unpause the TID if no cleanup is required.
6100 */
6101 if (! atid->cleanup_inprogress)
6102 ath_tx_tid_resume(sc, atid);
6103 }
6104 ATH_TX_UNLOCK(sc);
6105
6106 /* Handle completing frames and fail them */
6107 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
6108 TAILQ_REMOVE(&bf_cq, bf, bf_list);
6109 ath_tx_default_comp(sc, bf, 1);
6110 }
6111
6112}
6113
6114/*
6115 * Handle a node reassociation.
6116 *
6117 * We may have a bunch of frames queued to the hardware; those need
6118 * to be marked as cleanup.
6119 */
6120void
6122{
6123 struct ath_tid *tid;
6124 int i;
6125 ath_bufhead bf_cq;
6126 struct ath_buf *bf;
6127
6128 TAILQ_INIT(&bf_cq);
6129
6131
6132 ATH_TX_LOCK(sc);
6133 for (i = 0; i < IEEE80211_TID_SIZE; i++) {
6134 tid = &an->an_tid[i];
6135 if (tid->hwq_depth == 0)
6136 continue;
6137 DPRINTF(sc, ATH_DEBUG_NODE,
6138 "%s: %6D: TID %d: cleaning up TID\n",
6139 __func__,
6140 an->an_node.ni_macaddr,
6141 ":",
6142 i);
6143 /*
6144 * In case there's a followup call to this, only call it
6145 * if we don't have a cleanup in progress.
6146 */
6147 if (! tid->cleanup_inprogress) {
6148 ath_tx_tid_pause(sc, tid);
6149 ath_tx_tid_cleanup(sc, an, i, &bf_cq);
6150 /*
6151 * Unpause the TID if no cleanup is required.
6152 */
6153 if (! tid->cleanup_inprogress)
6154 ath_tx_tid_resume(sc, tid);
6155 }
6156 }
6157 ATH_TX_UNLOCK(sc);
6158
6159 /* Handle completing frames and fail them */
6160 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
6161 TAILQ_REMOVE(&bf_cq, bf, bf_list);
6162 ath_tx_default_comp(sc, bf, 1);
6163 }
6164}
6165
6166/*
6167 * Note: net80211 bar_timeout() doesn't call this function on BAR failure;
6168 * it simply tears down the aggregation session. Ew.
6169 *
6170 * It however will call ieee80211_ampdu_stop() which will call
6171 * ic->ic_addba_stop().
6172 *
6173 * XXX This uses a hard-coded max BAR count value; the whole
6174 * XXX BAR TX success or failure should be better handled!
6175 */
6176void
6177ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
6178 int status)
6179{
6180 struct ath_softc *sc = ni->ni_ic->ic_softc;
6181 int tid = tap->txa_tid;
6182 struct ath_node *an = ATH_NODE(ni);
6183 struct ath_tid *atid = &an->an_tid[tid];
6184 int attempts = tap->txa_attempts;
6185 int old_txa_start;
6186
6187 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6188 "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n",
6189 __func__,
6190 ni->ni_macaddr,
6191 ":",
6192 tap->txa_tid,
6193 atid->tid,
6194 status,
6195 attempts,
6196 tap->txa_start,
6197 tap->txa_seqpending);
6198
6199 /* Note: This may update the BAW details */
6200 /*
6201 * XXX What if this does slide the BAW along? We need to somehow
6202 * XXX either fix things when it does happen, or prevent the
6203 * XXX seqpending value to be anything other than exactly what
6204 * XXX the hell we want!
6205 *
6206 * XXX So for now, how I do this inside the TX lock for now
6207 * XXX and just correct it afterwards? The below condition should
6208 * XXX never happen and if it does I need to fix all kinds of things.
6209 */
6210 ATH_TX_LOCK(sc);
6211 old_txa_start = tap->txa_start;
6212 sc->sc_bar_response(ni, tap, status);
6213 if (tap->txa_start != old_txa_start) {
6214 device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n",
6215 __func__,
6216 tid,
6217 tap->txa_start,
6218 old_txa_start);
6219 }
6220 tap->txa_start = old_txa_start;
6221 ATH_TX_UNLOCK(sc);
6222
6223 /* Unpause the TID */
6224 /*
6225 * XXX if this is attempt=50, the TID will be downgraded
6226 * XXX to a non-aggregate session. So we must unpause the
6227 * XXX TID here or it'll never be done.
6228 *
6229 * Also, don't call it if bar_tx/bar_wait are 0; something
6230 * has beaten us to the punch? (XXX figure out what?)
6231 */
6232 if (status == 0 || attempts == 50) {
6233 ATH_TX_LOCK(sc);
6234 if (atid->bar_tx == 0 || atid->bar_wait == 0)
6235 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6236 "%s: huh? bar_tx=%d, bar_wait=%d\n",
6237 __func__,
6238 atid->bar_tx, atid->bar_wait);
6239 else
6240 ath_tx_tid_bar_unsuspend(sc, atid);
6241 ATH_TX_UNLOCK(sc);
6242 }
6243}
6244
6245/*
6246 * This is called whenever the pending ADDBA request times out.
6247 * Unpause and reschedule the TID.
6248 */
6249void
6250ath_addba_response_timeout(struct ieee80211_node *ni,
6251 struct ieee80211_tx_ampdu *tap)
6252{
6253 struct ath_softc *sc = ni->ni_ic->ic_softc;
6254 int tid = tap->txa_tid;
6255 struct ath_node *an = ATH_NODE(ni);
6256 struct ath_tid *atid = &an->an_tid[tid];
6257
6258 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6259 "%s: %6D: TID=%d, called; resuming\n",
6260 __func__,
6261 ni->ni_macaddr,
6262 ":",
6263 tid);
6264
6265 ATH_TX_LOCK(sc);
6266 atid->addba_tx_pending = 0;
6267 ATH_TX_UNLOCK(sc);
6268
6269 /* Note: This updates the aggregate state to (again) pending */
6270 sc->sc_addba_response_timeout(ni, tap);
6271
6272 /* Unpause the TID; which reschedules it */
6273 ATH_TX_LOCK(sc);
6274 ath_tx_tid_resume(sc, atid);
6275 ATH_TX_UNLOCK(sc);
6276}
6277
6278/*
6279 * Check if a node is asleep or not.
6280 */
6281int
6283{
6284
6286
6287 return (an->an_is_powersave);
6288}
6289
6290/*
6291 * Mark a node as currently "in powersaving."
6292 * This suspends all traffic on the node.
6293 *
6294 * This must be called with the node/tx locks free.
6295 *
6296 * XXX TODO: the locking silliness below is due to how the node
6297 * locking currently works. Right now, the node lock is grabbed
6298 * to do rate control lookups and these are done with the TX
6299 * queue lock held. This means the node lock can't be grabbed
6300 * first here or a LOR will occur.
6301 *
6302 * Eventually (hopefully!) the TX path code will only grab
6303 * the TXQ lock when transmitting and the ath_node lock when
6304 * doing node/TID operations. There are other complications -
6305 * the sched/unsched operations involve walking the per-txq
6306 * 'active tid' list and this requires both locks to be held.
6307 */
6308void
6310{
6311 struct ath_tid *atid;
6312 struct ath_txq *txq;
6313 int tid;
6314
6316
6317 /* Suspend all traffic on the node */
6318 ATH_TX_LOCK(sc);
6319
6320 if (an->an_is_powersave) {
6321 DPRINTF(sc, ATH_DEBUG_XMIT,
6322 "%s: %6D: node was already asleep!\n",
6323 __func__, an->an_node.ni_macaddr, ":");
6324 ATH_TX_UNLOCK(sc);
6325 return;
6326 }
6327
6328 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6329 atid = &an->an_tid[tid];
6330 txq = sc->sc_ac2q[atid->ac];
6331
6332 ath_tx_tid_pause(sc, atid);
6333 }
6334
6335 /* Mark node as in powersaving */
6336 an->an_is_powersave = 1;
6337
6338 ATH_TX_UNLOCK(sc);
6339}
6340
6341/*
6342 * Mark a node as currently "awake."
6343 * This resumes all traffic to the node.
6344 */
6345void
6347{
6348 struct ath_tid *atid;
6349 struct ath_txq *txq;
6350 int tid;
6351
6353
6354 ATH_TX_LOCK(sc);
6355
6356 /* !? */
6357 if (an->an_is_powersave == 0) {
6358 ATH_TX_UNLOCK(sc);
6359 DPRINTF(sc, ATH_DEBUG_XMIT,
6360 "%s: an=%p: node was already awake\n",
6361 __func__, an);
6362 return;
6363 }
6364
6365 /* Mark node as awake */
6366 an->an_is_powersave = 0;
6367 /*
6368 * Clear any pending leaked frame requests
6369 */
6370 an->an_leak_count = 0;
6371
6372 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6373 atid = &an->an_tid[tid];
6374 txq = sc->sc_ac2q[atid->ac];
6375
6376 ath_tx_tid_resume(sc, atid);
6377 }
6378 ATH_TX_UNLOCK(sc);
6379}
6380
6381static int
6383{
6384
6385 /* nothing new needed */
6386 return (0);
6387}
6388
6389static int
6391{
6392
6393 /* nothing new needed */
6394 return (0);
6395}
6396
6397void
6399{
6400 /*
6401 * For now, just set the descriptor length to sizeof(ath_desc);
6402 * worry about extracting the real length out of the HAL later.
6403 */
6404 sc->sc_tx_desclen = sizeof(struct ath_desc);
6405 sc->sc_tx_statuslen = sizeof(struct ath_desc);
6406 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */
6407
6411
6414
6416}
uint16_t ath_hal_computetxtime(struct ath_hal *ah, const HAL_RATE_TABLE *rates, uint32_t frameLen, uint16_t rateix, HAL_BOOL shortPreamble, HAL_BOOL includeSifs)
Definition: ah.c:432
HAL_PKT_TYPE
Definition: ah.h:398
@ HAL_PKT_TYPE_PSPOLL
Definition: ah.h:401
@ HAL_PKT_TYPE_NORMAL
Definition: ah.h:399
@ HAL_PKT_TYPE_PROBE_RESP
Definition: ah.h:403
@ HAL_PKT_TYPE_ATIM
Definition: ah.h:400
@ HAL_PKT_TYPE_BEACON
Definition: ah.h:402
@ HAL_PM_AWAKE
Definition: ah.h:440
HAL_BOOL
Definition: ah.h:93
@ AH_FALSE
Definition: ah.h:94
@ AH_TRUE
Definition: ah.h:95
#define HAL_TXERR_FILT
Definition: ah_desc.h:64
#define HAL_TX_BA
Definition: ah_desc.h:70
#define HAL_TXDESC_INTREQ
Definition: ah_desc.h:264
#define HAL_TXKEYIX_INVALID
Definition: ah_desc.h:215
#define HAL_TXDESC_CTSENA
Definition: ah_desc.h:263
#define HAL_TXDESC_POS
Definition: ah_desc.h:274
#define HAL_TXDESC_NOACK
Definition: ah_desc.h:261
#define HAL_TXDESC_HWTS
Definition: ah_desc.h:273
#define HAL_TXDESC_CLRDMASK
Definition: ah_desc.h:260
#define HAL_TXERR_XRETRY
Definition: ah_desc.h:63
#define HAL_TXDESC_RTSENA
Definition: ah_desc.h:262
#define IEEE80211_CRC_LEN
Definition: ah_internal.h:507
uint32_t HAL_DMA_ADDR
Definition: ah_osdep.h:57
void ath_rate_findrate(struct ath_softc *sc, struct ath_node *an, int shortPreamble, size_t frameLen, int tid, int is_aggr, u_int8_t *rix, int *try0, u_int8_t *txrate, int *maxdur, int *maxpktlen)
Definition: amrr.c:106
void ath_rate_getxtxrates(struct ath_softc *sc, struct ath_node *an, uint8_t rix0, int is_aggr, struct ath_rc_series *rc)
Definition: amrr.c:130
int ath_tx_findrix(const struct ath_softc *sc, uint8_t rate)
Definition: if_ath.c:4210
void ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, int rc_framelen, int nframes, int nbad)
Definition: if_ath.c:4348
void ath_legacy_attach_comp_func(struct ath_softc *sc)
Definition: if_ath.c:263
struct ath_buf * ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf)
Definition: if_ath.c:3243
struct ath_buf * ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype)
Definition: if_ath.c:3295
void ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
Definition: if_ath.c:4288
void ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni, int enable)
Definition: if_ath.c:6913
struct ath_buf * _ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype)
Definition: if_ath.c:3153
void ath_freebuf(struct ath_softc *sc, struct ath_buf *bf)
Definition: if_ath.c:4893
void ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
Definition: if_ath.c:5174
void ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf)
Definition: if_ath.c:4804
#define ATH_ALQ_EDMA_TXDESC
Definition: if_ath_alq.h:46
static int if_ath_alq_checkdebug(struct if_ath_alq *alq, uint16_t op)
Definition: if_ath_alq.h:163
void if_ath_alq_post(struct if_ath_alq *alq, uint16_t op, uint16_t len, const char *buf)
#define ATH_KTR(_sc, _km, _kf,...)
Definition: if_ath_debug.h:115
#define IFF_DUMPPKTS(sc, m)
Definition: if_ath_debug.h:117
#define DPRINTF(sc, m, fmt,...)
Definition: if_ath_debug.h:118
#define ath_power_restore_power_state(sc)
Definition: if_ath_misc.h:131
void ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq)
#define ath_power_set_power_state(sc, ps)
Definition: if_ath_misc.h:129
static void ath_tx_swq_kick(struct ath_softc *sc)
Definition: if_ath_misc.h:148
#define INCR(_l, _sz)
static int ieee80211_is_action(struct ieee80211_frame *wh)
Definition: if_ath_tx.c:2586
static void ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, ath_bufhead *bf_cq)
Definition: if_ath_tx.c:3930
static void ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
Definition: if_ath_tx.c:552
static void ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
Definition: if_ath_tx.c:4275
void ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
Definition: if_ath_tx.c:4144
#define SWMAX_RETRIES
Definition: if_ath_tx.c:115
int ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params)
Definition: if_ath_tx.c:2420
static int ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
Definition: if_ath_tx.c:183
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
Definition: if_ath_tx.c:198
static void ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an, int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq)
Definition: if_ath_tx.c:4315
static void ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid)
Definition: if_ath_tx.c:3667
static void ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)
Definition: if_ath_tx.c:3372
static int ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an, struct mbuf *m0, int *queue_to_head)
Definition: if_ath_tx.c:1444
static void ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
Definition: if_ath_tx.c:1259
static void ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first, int fail)
Definition: if_ath_tx.c:4822
static int ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf, ath_bufhead *bf_q)
Definition: if_ath_tx.c:4607
int ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int dialogtoken, int baparamset, int batimeout)
Definition: if_ath_tx.c:5918
void ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
Definition: if_ath_tx.c:5750
void ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)
Definition: if_ath_tx.c:6309
static int ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix, int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt, int flags)
Definition: if_ath_tx.c:1204
static void ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
Definition: if_ath_tx.c:1331
void ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
Definition: if_ath_tx.c:5354
static void ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf)
Definition: if_ath_tx.c:1418
static void ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid)
Definition: if_ath_tx.c:3727
void ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int status)
Definition: if_ath_tx.c:6177
static int ath_legacy_dma_txsetup(struct ath_softc *sc)
Definition: if_ath_tx.c:6382
static void ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf)
Definition: if_ath_tx.c:3455
static void ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
Definition: if_ath_tx.c:1119
struct ieee80211_tx_ampdu * ath_tx_get_tx_tid(struct ath_node *an, int tid)
Definition: if_ath_tx.c:5857
static void ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
Definition: if_ath_tx.c:5165
static int ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid)
Definition: if_ath_tx.c:3696
void ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
Definition: if_ath_tx.c:6043
static void ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid)
Definition: if_ath_tx.c:3633
static void ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf_first, ath_bufhead *bf_q)
Definition: if_ath_tx.c:3574
void ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid)
Definition: if_ath_tx.c:5658
static void ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf)
Definition: if_ath_tx.c:3808
static int ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni, struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen, int *keyix)
Definition: if_ath_tx.c:977
static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
Definition: if_ath_tx.c:5873
void ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an)
Definition: if_ath_tx.c:6121
static void ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
Definition: if_ath_tx.c:3356
static void ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
Definition: if_ath_tx.c:1505
static int ath_tx_action_frame_override_queue(struct ath_softc *sc, struct ieee80211_node *ni, struct mbuf *m0, int *tid)
Definition: if_ath_tx.c:2607
static void ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
Definition: if_ath_tx.c:960
void ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, struct ath_buf *bf)
Definition: if_ath_tx.c:2657
int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an)
Definition: if_ath_tx.c:6282
static void ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
Definition: if_ath_tx.c:699
__FBSDID("$FreeBSD$")
static int ath_tx_is_11n(struct ath_softc *sc)
Definition: if_ath_tx.c:170
static void ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf, int tid, int pktlen, int is_aggr)
Definition: if_ath_tx.c:1379
void ath_addba_response_timeout(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
Definition: if_ath_tx.c:6250
void ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
Definition: if_ath_tx.c:4188
static struct ath_buf * ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, struct ath_buf *bf)
Definition: if_ath_tx.c:4451
int ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0)
Definition: if_ath_tx.c:1904
static void ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
Definition: if_ath_tx.c:4759
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, const struct ath_buf *bf)
Definition: if_ath_tx.c:2795
static void ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
Definition: if_ath_tx.c:911
static int ath_legacy_dma_txteardown(struct ath_softc *sc)
Definition: if_ath_tx.c:6390
static void ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf)
Definition: if_ath_tx.c:1046
static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
Definition: if_ath_tx.c:5891
static int ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf)
Definition: if_ath_tx.c:3523
static int ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
Definition: if_ath_tx.c:302
static void ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid)
Definition: if_ath_tx.c:3480
static void ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
Definition: if_ath_tx.c:2961
void ath_xmit_setup_legacy(struct ath_softc *sc)
Definition: if_ath_tx.c:6398
static void ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first, struct ath_tid *tid)
Definition: if_ath_tx.c:4673
static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0)
Definition: if_ath_tx.c:2984
static void ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
Definition: if_ath_tx.c:4517
static void ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0, struct ath_buf *bf, bool is_aggr, int is_first_subframe, int is_last_subframe)
Definition: if_ath_tx.c:365
void ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)
Definition: if_ath_tx.c:6346
static void ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an, const char *pfx, struct ath_tid *tid, struct ath_buf *bf)
Definition: if_ath_tx.c:3848
static void ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf)
Definition: if_ath_tx.c:2846
static void ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid, struct ath_buf *bf)
Definition: if_ath_tx.c:3430
static void ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid, ath_bufhead *bf_cq)
Definition: if_ath_tx.c:4380
static void ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
Definition: if_ath_tx.c:742
static int ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid)
Definition: if_ath_tx.c:2886
#define ATH_NONQOS_TID_AC
Definition: if_ath_tx.c:120
void ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
Definition: if_ath_tx.c:3137
static void ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf)
Definition: if_ath_tx.c:2748
void ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
Definition: if_ath_tx.c:3318
static uint8_t ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt, int cix, int shortPreamble)
Definition: if_ath_tx.c:1179
static void ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf)
Definition: if_ath_tx.c:507
void ath_txfrag_cleanup(struct ath_softc *sc, ath_bufhead *frags, struct ieee80211_node *ni)
Definition: if_ath_tx.c:256
static void ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an)
Definition: if_ath_tx.c:3296
static int ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
Definition: if_ath_tx.c:1555
static void ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an, struct ath_txq *txq, struct ath_buf *bf)
Definition: if_ath_tx.c:3048
void ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
Definition: if_ath_tx.c:4089
void ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
Definition: if_ath_tx.c:2908
static int ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
Definition: if_ath_tx.c:235
int ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, struct mbuf *m0, struct ieee80211_node *ni)
Definition: if_ath_tx.c:277
void ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid)
Definition: if_ath_tx.c:5427
static void ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)
Definition: if_ath_tx.c:4020
static int ath_tx_tid_swq_depth_bytes(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid)
Definition: if_ath_tx.c:5371
int ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int status, int code, int batimeout)
Definition: if_ath_tx.c:5996
static int ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0, const struct ieee80211_bpf_params *params)
Definition: if_ath_tx.c:2139
#define ATH_AGGR_MAXSIZE
Definition: if_ath_tx.h:87
#define ath_tx_handoff(_sc, _txq, _bf)
Definition: if_ath_tx.h:162
#define ATH_BA_ISSET(_bm, _n)
Definition: if_ath_tx.h:71
#define BAW_WITHIN(_start, _bawsz, _seqno)
Definition: if_ath_tx.h:81
#define ATH_BA_INDEX(_st, _seq)
Definition: if_ath_tx.h:43
#define SEQNO(_a)
Definition: if_ath_tx.h:75
void ath_buf_set_rate(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf)
Definition: if_ath_tx_ht.c:743
ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, struct ath_node *an, struct ath_tid *tid, ath_bufhead *bf_q)
Definition: if_ath_tx_ht.c:820
void ath_tx_rate_fill_rcflags(struct ath_softc *sc, struct ath_buf *bf)
Definition: if_ath_tx_ht.c:224
ATH_AGGR_STATUS
Definition: if_ath_tx_ht.h:43
@ ATH_AGGR_BAW_CLOSED
Definition: if_ath_tx_ht.h:45
@ ATH_AGGR_LEAK_CLOSED
Definition: if_ath_tx_ht.h:51
@ ATH_AGGR_DONE
Definition: if_ath_tx_ht.h:44
@ ATH_AGGR_8K_LIMITED
Definition: if_ath_tx_ht.h:48
#define ATH_RC_NUM
Definition: if_athrate.h:82
#define ath_hal_set11n_aggr_last(_ah, _ds)
Definition: if_athvar.h:1441
#define ATH_BUF_TOA_PROBE
Definition: if_athvar.h:321
#define ATH_TID_INSERT_HEAD(_tq, _elm, _field)
Definition: if_athvar.h:441
#define ath_hal_set11nburstduration(_ah, _ds, _dur)
Definition: if_athvar.h:1444
#define ATH_TID_FILT_REMOVE(_tq, _elm, _field)
Definition: if_athvar.h:472
#define ath_hal_puttxbuf(_ah, _q, _bufaddr)
Definition: if_athvar.h:1121
#define ATH_TID_FILT_INSERT_TAIL(_tq, _elm, _field)
Definition: if_athvar.h:467
#define ath_hal_clr11n_aggr(_ah, _ds)
Definition: if_athvar.h:1446
#define ATH_TX_LOCK(_sc)
Definition: if_athvar.h:956
#define ATH_TXBUF_LOCK(_sc)
Definition: if_athvar.h:1028
#define ATH_TID_INSERT_TAIL(_tq, _elm, _field)
Definition: if_athvar.h:446
#define ATH_TXQ_LAST(_tq, _field)
Definition: if_athvar.h:436
#define ath_hal_set11n_aggr_first(_ah, _ds, _len, _num)
Definition: if_athvar.h:1437
#define ATH_NODE_LOCK(_an)
Definition: if_athvar.h:414
#define ATH_TXQ_LOCK_ASSERT(_tq)
Definition: if_athvar.h:410
#define ath_hal_setuptxdesc(_ah, _ds, _plen, _hlen, _atype, _txpow, _txr0, _txtr0, _keyix, _ant, _flags, _rtsrate, _rtsdura)
Definition: if_athvar.h:1390
#define ATH_NODE_UNLOCK(_an)
Definition: if_athvar.h:415
@ ATH_BUFTYPE_MGMT
Definition: if_athvar.h:229
@ ATH_BUFTYPE_NORMAL
Definition: if_athvar.h:228
#define ATH_PCU_UNLOCK(_sc)
Definition: if_athvar.h:992
#define ATH_TX_LOCK_ASSERT(_sc)
Definition: if_athvar.h:958
#define ATH_TID_FIRST(_tq)
Definition: if_athvar.h:456
#define ATH_MAX_SCATTER
Definition: if_athvar.h:248
#define ATH_TXMAXTRY
Definition: if_athvar.h:78
#define ATH_TID_REMOVE(_tq, _elm, _field)
Definition: if_athvar.h:451
#define ATH_TXQ_PUTRUNNING
Definition: if_athvar.h:356
#define ath_hal_setupxtxdesc(_ah, _ds, _txr1, _txtr1, _txr2, _txtr2, _txr3, _txtr3)
Definition: if_athvar.h:1396
#define ATH_TXBUF_LOCK_ASSERT(_sc)
Definition: if_athvar.h:1030
#define ath_hal_setuplasttxdesc(_ah, _ds, _ds0)
Definition: if_athvar.h:1430
#define ATH_TXQ_LOCK(_tq)
Definition: if_athvar.h:408
#define ATH_LOCK(_sc)
Definition: if_athvar.h:938
#define ATH_TID_MAX_BUFS
Definition: if_athvar.h:120
#define ATH_TID_FILT_LAST(_tq, _field)
Definition: if_athvar.h:478
#define ATH_TXQ_UNLOCK(_tq)
Definition: if_athvar.h:409
#define ath_hal_set11n_aggr_middle(_ah, _ds, _num)
Definition: if_athvar.h:1439
#define ATH_TXMGTTRY
Definition: if_athvar.h:79
#define ATH_TX_UNLOCK(_sc)
Definition: if_athvar.h:957
#define ATH_TID_FILT_FIRST(_tq)
Definition: if_athvar.h:477
#define ATH_TX_UNLOCK_ASSERT(_sc)
Definition: if_athvar.h:960
#define ATH_NODE(ni)
Definition: if_athvar.h:210
#define ATH_TXQ_SWQ
Definition: if_athvar.h:352
#define ath_hal_settxdesclink(_ah, _ds, _link)
Definition: if_athvar.h:1409
#define ath_hal_filltxdesc(_ah, _ds, _b, _l, _did, _qid, _first, _last, _ds0)
Definition: if_athvar.h:1400
#define ATH_UNLOCK(_sc)
Definition: if_athvar.h:939
#define ATH_PCU_LOCK(_sc)
Definition: if_athvar.h:991
#define ATH_BUF_BUSY
Definition: if_athvar.h:318
#define ath_hal_gettxdesclinkptr(_ah, _ds, _linkptr)
Definition: if_athvar.h:1413
#define ATH_TXQ_INSERT_TAIL(_tq, _elm, _field)
Definition: if_athvar.h:427
#define ATH_VAP(vap)
Definition: if_athvar.h:498
#define ATH_TXBUF_UNLOCK(_sc)
Definition: if_athvar.h:1029
#define ath_hal_txstart(_ah, _q)
Definition: if_athvar.h:1129
#define MIN(a, b)
Definition: sample.h:120
uint8_t shortPreamble
Definition: ah.h:689
uint8_t rateCode
Definition: ah.h:688
uint8_t controlRate
Definition: ah.h:693
struct HAL_RATE_TABLE::@3 info[64]
uint8_t phy
Definition: ah.h:686
uint16_t lpAckDuration
Definition: ah.h:695
uint16_t spAckDuration
Definition: ah.h:696
u_int32_t bfs_addedbaw
Definition: if_athvar.h:274
uint32_t bfs_ctsduration
Definition: if_athvar.h:310
uint8_t bfs_nframes
Definition: if_athvar.h:266
u_int32_t bfs_isretried
Definition: if_athvar.h:272
struct ath_desc_status bf_status
Definition: if_athvar.h:240
uint8_t bfs_tx_queue
Definition: if_athvar.h:268
bus_addr_t bf_daddr
Definition: if_athvar.h:241
uint8_t bfs_try0
Definition: if_athvar.h:296
uint32_t bfs_pktlen
Definition: if_athvar.h:289
uint16_t bf_descid
Definition: if_athvar.h:238
u_int32_t bfs_dobaw
Definition: if_athvar.h:273
uint16_t bfs_txflags
Definition: if_athvar.h:294
uint8_t bfs_txrate0
Definition: if_athvar.h:295
u_int32_t bfs_ismrr
Definition: if_athvar.h:277
void(* bf_comp)(struct ath_softc *sc, struct ath_buf *bf, int fail)
Definition: if_athvar.h:257
int32_t bfs_txantenna
Definition: if_athvar.h:304
uint8_t bfs_ctsrate0
Definition: if_athvar.h:299
uint16_t bfs_seqno
Definition: if_athvar.h:261
uint16_t bfs_al
Definition: if_athvar.h:292
struct mbuf * bf_m
Definition: if_athvar.h:243
uint16_t bfs_ndelim
Definition: if_athvar.h:262
uint16_t bf_flags
Definition: if_athvar.h:237
struct ath_buf * bf_last
Definition: if_athvar.h:246
struct ath_desc * bf_lastds
Definition: if_athvar.h:245
u_int32_t bfs_aggr
Definition: if_athvar.h:270
uint8_t bfs_pri
Definition: if_athvar.h:267
struct ath_desc * bf_desc
Definition: if_athvar.h:239
struct ath_rc_series bfs_rc[ATH_RC_NUM]
Definition: if_athvar.h:312
int bf_nseg
Definition: if_athvar.h:235
struct ath_buf::@32 bf_state
HAL_PKT_TYPE bfs_atype
Definition: if_athvar.h:287
uint32_t bf_nextfraglen
Definition: if_athvar.h:250
bus_dma_segment_t bf_segs[ATH_MAX_SCATTER]
Definition: if_athvar.h:249
uint8_t bfs_tid
Definition: if_athvar.h:265
u_int32_t bfs_doprot
Definition: if_athvar.h:278
struct ieee80211_node * bf_node
Definition: if_athvar.h:244
uint16_t bfs_txpower
Definition: if_athvar.h:298
struct ath_buf * bf_next
Definition: if_athvar.h:234
int32_t bfs_keyix
Definition: if_athvar.h:303
int32_t bfs_rc_maxpktlen
Definition: if_athvar.h:311
u_int32_t bfs_shpream
Definition: if_athvar.h:275
uint8_t bfs_ctsrate
Definition: if_athvar.h:300
uint16_t bfs_hdrlen
Definition: if_athvar.h:291
uint8_t bfs_retries
Definition: if_athvar.h:264
u_int32_t bfs_doratelookup
Definition: if_athvar.h:279
bus_dmamap_t bf_dmamap
Definition: if_athvar.h:242
int dd_descsize
Definition: if_athvar.h:331
Definition: ah.h:1219
uint32_t ah_magic
Definition: ah.h:1220
uint32_t an_is_powersave
Definition: if_athvar.h:196
struct ath_tid an_tid[IEEE80211_TID_SIZE]
Definition: if_athvar.h:200
uint32_t an_stack_psq
Definition: if_athvar.h:197
uint32_t an_tim_set
Definition: if_athvar.h:198
u_int8_t an_mgmtrix
Definition: if_athvar.h:194
struct ieee80211_node an_node
Definition: if_athvar.h:193
uint32_t an_swq_depth
Definition: if_athvar.h:203
int clrdmask
Definition: if_athvar.h:205
u_int8_t an_mcastrix
Definition: if_athvar.h:195
uint32_t an_leak_count
Definition: if_athvar.h:206
uint8_t tries
Definition: if_athrate.h:95
uint8_t ratecode
Definition: if_athrate.h:94
uint8_t rix
Definition: if_athrate.h:93
void(* sc_addba_stop)(struct ieee80211_node *, struct ieee80211_tx_ampdu *)
Definition: if_athvar.h:892
u_int8_t sc_txrix
Definition: if_athvar.h:729
struct ath_txq * sc_ac2q[5]
Definition: if_athvar.h:766
struct task sc_fataltask
Definition: if_athvar.h:790
u_int32_t sc_running
Definition: if_athvar.h:657
const HAL_RATE_TABLE * sc_currates
Definition: if_athvar.h:678
enum ieee80211_phymode sc_curmode
Definition: if_athvar.h:679
int sc_tx_desclen
Definition: if_athvar.h:589
bus_dma_tag_t sc_dmat
Definition: if_athvar.h:601
int(* sc_addba_request)(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int, int, int)
Definition: if_athvar.h:888
int sc_txq_node_psq_maxdepth
Definition: if_athvar.h:848
int sc_txq_mcastq_maxdepth
Definition: if_athvar.h:847
struct ath_stats sc_stats
Definition: if_athvar.h:563
int sc_tx_nmaps
Definition: if_athvar.h:591
int sc_cabq_enable
Definition: if_athvar.h:667
int sc_wd_timer
Definition: if_athvar.h:774
int sc_hwq_limit_nonaggr
Definition: if_athvar.h:864
struct ath_tx_radiotap_header sc_tx_th
Definition: if_athvar.h:776
device_t sc_dev
Definition: if_athvar.h:598
struct ath_tx_aggr_stats sc_aggr_stats
Definition: if_athvar.h:564
u_int8_t sc_protrix
Definition: if_athvar.h:693
int sc_hwq_limit_aggr
Definition: if_athvar.h:865
int(* sc_addba_response)(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int, int, int)
Definition: if_athvar.h:890
void(* sc_bar_response)(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int status)
Definition: if_athvar.h:897
struct ath_txq * sc_cabq
Definition: if_athvar.h:785
u_int8_t sc_lastdatarix
Definition: if_athvar.h:694
u_int8_t ieeerate
Definition: if_athvar.h:687
uint32_t sc_tdma
Definition: if_athvar.h:646
struct ath_softc::@34 sc_hwmap[32]
uint32_t sc_invalid
Definition: if_athvar.h:620
struct ath_descdma sc_txdma
Definition: if_athvar.h:754
struct ath_hal * sc_ah
Definition: if_athvar.h:612
struct ath_tx_methods sc_tx
Definition: if_athvar.h:578
u_int sc_txantenna
Definition: if_athvar.h:698
void(* sc_addba_response_timeout)(struct ieee80211_node *, struct ieee80211_tx_ampdu *)
Definition: if_athvar.h:895
u_int sc_txintrperiod
Definition: if_athvar.h:764
u_int8_t txflags
Definition: if_athvar.h:689
struct ieee80211com sc_ic
Definition: if_athvar.h:562
uint32_t sc_txproc_cnt
Definition: if_athvar.h:713
uint32_t sc_txstart_cnt
Definition: if_athvar.h:714
uint64_t sc_debug
Definition: if_athvar.h:566
int sc_tx_statuslen
Definition: if_athvar.h:590
uint32_t sc_inreset_cnt
Definition: if_athvar.h:715
uint32_t sc_mrrprot
Definition: if_athvar.h:622
struct taskqueue * sc_tq
Definition: if_athvar.h:611
u_int32_t ast_tx_nodata
Definition: if_athioctl.h:79
u_int32_t ast_tx_raw
Definition: if_athioctl.h:129
u_int32_t ast_tx_swretries
Definition: if_athioctl.h:161
u_int32_t ast_tx_linear
Definition: if_athioctl.h:78
u_int32_t ast_tx_noack
Definition: if_athioctl.h:87
u_int32_t ast_tx_protect
Definition: if_athioctl.h:92
u_int32_t ast_tx_node_psq_overflow
Definition: if_athioctl.h:175
u_int32_t ast_tdma_ack
Definition: if_athioctl.h:142
u_int32_t ast_tx_nombuf
Definition: if_athioctl.h:76
u_int32_t ast_tx_rts
Definition: if_athioctl.h:88
u_int32_t ast_tx_swfiltered
Definition: if_athioctl.h:174
u_int32_t ast_tx_shortpre
Definition: if_athioctl.h:90
u_int32_t ast_tx_mcastq_overflow
Definition: if_athioctl.h:172
u_int32_t ast_tx_aggr_failall
Definition: if_athioctl.h:165
u_int32_t ast_tx_aggr_fail
Definition: if_athioctl.h:171
u_int32_t ast_tx_swretrymax
Definition: if_athioctl.h:162
u_int32_t ast_tx_aggr_ok
Definition: if_athioctl.h:170
u_int32_t ast_tx_busdma
Definition: if_athioctl.h:80
u_int32_t ast_tx_raw_fail
Definition: if_athioctl.h:143
u_int32_t ast_tx_htprotect
Definition: if_athioctl.h:154
u_int32_t ast_tx_nobuf
Definition: if_athioctl.h:136
int ac
Definition: if_athvar.h:131
int cleanup_inprogress
Definition: if_athvar.h:166
int incomp
Definition: if_athvar.h:172
struct ath_tid::@31 filtq
int bar_wait
Definition: if_athvar.h:153
int addba_tx_pending
Definition: if_athvar.h:152
int bar_tx
Definition: if_athvar.h:154
int tid
Definition: if_athvar.h:130
struct ath_buf * tx_buf[ATH_TID_MAX_BUFS]
Definition: if_athvar.h:184
struct ath_node * an
Definition: if_athvar.h:129
int baw_tail
Definition: if_athvar.h:188
int isfiltered
Definition: if_athvar.h:155
u_int axq_depth
Definition: if_athvar.h:133
int hwq_depth
Definition: if_athvar.h:132
int paused
Definition: if_athvar.h:146
int sched
Definition: if_athvar.h:145
int baw_head
Definition: if_athvar.h:186
u_int32_t aggr_baw_closed_single_pkt
Definition: if_athioctl.h:45
u_int32_t aggr_single_pkt
Definition: if_athioctl.h:42
u_int32_t aggr_sched_nopkt
Definition: if_athioctl.h:47
u_int32_t aggr_rts_aggr_limited
Definition: if_athioctl.h:48
u_int32_t aggr_pkts[64]
Definition: if_athioctl.h:41
u_int32_t aggr_nonbaw_pkt
Definition: if_athioctl.h:43
u_int32_t aggr_low_hwq_single_pkt
Definition: if_athioctl.h:46
u_int32_t aggr_aggr_pkt
Definition: if_athioctl.h:44
void(* xmit_drain)(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
Definition: if_athvar.h:557
void(* xmit_attach_comp_func)(struct ath_softc *sc)
Definition: if_athvar.h:551
int(* xmit_setup)(struct ath_softc *sc)
Definition: if_athvar.h:549
int(* xmit_teardown)(struct ath_softc *sc)
Definition: if_athvar.h:550
void(* xmit_handoff)(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
Definition: if_athvar.h:555
void(* xmit_dma_restart)(struct ath_softc *sc, struct ath_txq *txq)
Definition: if_athvar.h:553
uint8_t ts_flags
Definition: ah_desc.h:47
uint8_t ts_status
Definition: ah_desc.h:38
uint32_t ts_ba_high
Definition: ah_desc.h:53
uint32_t ts_ba_low
Definition: ah_desc.h:52
uint16_t ts_seqnum
Definition: ah_desc.h:35
uint8_t ts_tid
Definition: ah_desc.h:50
struct ath_txq::@33 fifo
u_int32_t * axq_link
Definition: if_athvar.h:360
u_int axq_flags
Definition: if_athvar.h:354
u_int axq_qnum
Definition: if_athvar.h:351
u_int axq_depth
Definition: if_athvar.h:357
u_int axq_aggr_depth
Definition: if_athvar.h:358
u_int axq_intrcnt
Definition: if_athvar.h:359
struct ath_txq av_mcastq
Definition: if_athvar.h:484