if_ath_tx.c revision 262007
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer,
11 *    without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 *    redistribution must be conditioned upon including a substantially
15 *    similar Disclaimer requirement for further binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/10/sys/dev/ath/if_ath_tx.c 262007 2014-02-17 01:36:53Z kevlo $");
33
34/*
35 * Driver for the Atheros Wireless LAN controller.
36 *
37 * This software is derived from work of Atsushi Onoe; his contribution
38 * is greatly appreciated.
39 */
40
41#include "opt_inet.h"
42#include "opt_ath.h"
43#include "opt_wlan.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/sysctl.h>
48#include <sys/mbuf.h>
49#include <sys/malloc.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52#include <sys/kernel.h>
53#include <sys/socket.h>
54#include <sys/sockio.h>
55#include <sys/errno.h>
56#include <sys/callout.h>
57#include <sys/bus.h>
58#include <sys/endian.h>
59#include <sys/kthread.h>
60#include <sys/taskqueue.h>
61#include <sys/priv.h>
62
63#include <machine/bus.h>
64
65#include <net/if.h>
66#include <net/if_dl.h>
67#include <net/if_media.h>
68#include <net/if_types.h>
69#include <net/if_arp.h>
70#include <net/ethernet.h>
71#include <net/if_llc.h>
72
73#include <net80211/ieee80211_var.h>
74#include <net80211/ieee80211_regdomain.h>
75#ifdef IEEE80211_SUPPORT_SUPERG
76#include <net80211/ieee80211_superg.h>
77#endif
78#ifdef IEEE80211_SUPPORT_TDMA
79#include <net80211/ieee80211_tdma.h>
80#endif
81#include <net80211/ieee80211_ht.h>
82
83#include <net/bpf.h>
84
85#ifdef INET
86#include <netinet/in.h>
87#include <netinet/if_ether.h>
88#endif
89
90#include <dev/ath/if_athvar.h>
91#include <dev/ath/ath_hal/ah_devid.h>		/* XXX for softled */
92#include <dev/ath/ath_hal/ah_diagcodes.h>
93
94#include <dev/ath/if_ath_debug.h>
95
96#ifdef ATH_TX99_DIAG
97#include <dev/ath/ath_tx99/ath_tx99.h>
98#endif
99
100#include <dev/ath/if_ath_misc.h>
101#include <dev/ath/if_ath_tx.h>
102#include <dev/ath/if_ath_tx_ht.h>
103
104#ifdef	ATH_DEBUG_ALQ
105#include <dev/ath/if_ath_alq.h>
106#endif
107
108/*
109 * How many retries to perform in software
110 */
111#define	SWMAX_RETRIES		10
112
113/*
114 * What queue to throw the non-QoS TID traffic into
115 */
116#define	ATH_NONQOS_TID_AC	WME_AC_VO
117
118#if 0
119static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an);
120#endif
121static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,
122    int tid);
123static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,
124    int tid);
125static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,
126    struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);
127static int ath_tx_action_frame_override_queue(struct ath_softc *sc,
128    struct ieee80211_node *ni, struct mbuf *m0, int *tid);
129static struct ath_buf *
130ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
131    struct ath_tid *tid, struct ath_buf *bf);
132
133#ifdef	ATH_DEBUG_ALQ
134void
135ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first)
136{
137	struct ath_buf *bf;
138	int i, n;
139	const char *ds;
140
141	/* XXX we should skip out early if debugging isn't enabled! */
142	bf = bf_first;
143
144	while (bf != NULL) {
145		/* XXX should ensure bf_nseg > 0! */
146		if (bf->bf_nseg == 0)
147			break;
148		n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;
149		for (i = 0, ds = (const char *) bf->bf_desc;
150		    i < n;
151		    i++, ds += sc->sc_tx_desclen) {
152			if_ath_alq_post(&sc->sc_alq,
153			    ATH_ALQ_EDMA_TXDESC,
154			    sc->sc_tx_desclen,
155			    ds);
156		}
157		bf = bf->bf_next;
158	}
159}
160#endif /* ATH_DEBUG_ALQ */
161
162/*
163 * Whether to use the 11n rate scenario functions or not
164 */
165static inline int
166ath_tx_is_11n(struct ath_softc *sc)
167{
168	return ((sc->sc_ah->ah_magic == 0x20065416) ||
169		    (sc->sc_ah->ah_magic == 0x19741014));
170}
171
172/*
173 * Obtain the current TID from the given frame.
174 *
175 * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.)
176 * This has implications for which AC/priority the packet is placed
177 * in.
178 */
179static int
180ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
181{
182	const struct ieee80211_frame *wh;
183	int pri = M_WME_GETAC(m0);
184
185	wh = mtod(m0, const struct ieee80211_frame *);
186	if (! IEEE80211_QOS_HAS_SEQ(wh))
187		return IEEE80211_NONQOS_TID;
188	else
189		return WME_AC_TO_TID(pri);
190}
191
192static void
193ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
194{
195	struct ieee80211_frame *wh;
196
197	wh = mtod(bf->bf_m, struct ieee80211_frame *);
198	/* Only update/resync if needed */
199	if (bf->bf_state.bfs_isretried == 0) {
200		wh->i_fc[1] |= IEEE80211_FC1_RETRY;
201		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
202		    BUS_DMASYNC_PREWRITE);
203	}
204	bf->bf_state.bfs_isretried = 1;
205	bf->bf_state.bfs_retries ++;
206}
207
208/*
209 * Determine what the correct AC queue for the given frame
210 * should be.
211 *
212 * This code assumes that the TIDs map consistently to
213 * the underlying hardware (or software) ath_txq.
214 * Since the sender may try to set an AC which is
215 * arbitrary, non-QoS TIDs may end up being put on
216 * completely different ACs. There's no way to put a
217 * TID into multiple ath_txq's for scheduling, so
218 * for now we override the AC/TXQ selection and set
219 * non-QOS TID frames into the BE queue.
220 *
221 * This may be completely incorrect - specifically,
222 * some management frames may end up out of order
223 * compared to the QoS traffic they're controlling.
224 * I'll look into this later.
225 */
226static int
227ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
228{
229	const struct ieee80211_frame *wh;
230	int pri = M_WME_GETAC(m0);
231	wh = mtod(m0, const struct ieee80211_frame *);
232	if (IEEE80211_QOS_HAS_SEQ(wh))
233		return pri;
234
235	return ATH_NONQOS_TID_AC;
236}
237
238void
239ath_txfrag_cleanup(struct ath_softc *sc,
240	ath_bufhead *frags, struct ieee80211_node *ni)
241{
242	struct ath_buf *bf, *next;
243
244	ATH_TXBUF_LOCK_ASSERT(sc);
245
246	TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
247		/* NB: bf assumed clean */
248		TAILQ_REMOVE(frags, bf, bf_list);
249		ath_returnbuf_head(sc, bf);
250		ieee80211_node_decref(ni);
251	}
252}
253
254/*
255 * Setup xmit of a fragmented frame.  Allocate a buffer
256 * for each frag and bump the node reference count to
257 * reflect the held reference to be setup by ath_tx_start.
258 */
259int
260ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
261	struct mbuf *m0, struct ieee80211_node *ni)
262{
263	struct mbuf *m;
264	struct ath_buf *bf;
265
266	ATH_TXBUF_LOCK(sc);
267	for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
268		/* XXX non-management? */
269		bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
270		if (bf == NULL) {	/* out of buffers, cleanup */
271			DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n",
272			    __func__);
273			ath_txfrag_cleanup(sc, frags, ni);
274			break;
275		}
276		ieee80211_node_incref(ni);
277		TAILQ_INSERT_TAIL(frags, bf, bf_list);
278	}
279	ATH_TXBUF_UNLOCK(sc);
280
281	return !TAILQ_EMPTY(frags);
282}
283
284/*
285 * Reclaim mbuf resources.  For fragmented frames we
286 * need to claim each frag chained with m_nextpkt.
287 */
288void
289ath_freetx(struct mbuf *m)
290{
291	struct mbuf *next;
292
293	do {
294		next = m->m_nextpkt;
295		m->m_nextpkt = NULL;
296		m_freem(m);
297	} while ((m = next) != NULL);
298}
299
300static int
301ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
302{
303	struct mbuf *m;
304	int error;
305
306	/*
307	 * Load the DMA map so any coalescing is done.  This
308	 * also calculates the number of descriptors we need.
309	 */
310	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
311				     bf->bf_segs, &bf->bf_nseg,
312				     BUS_DMA_NOWAIT);
313	if (error == EFBIG) {
314		/* XXX packet requires too many descriptors */
315		bf->bf_nseg = ATH_MAX_SCATTER + 1;
316	} else if (error != 0) {
317		sc->sc_stats.ast_tx_busdma++;
318		ath_freetx(m0);
319		return error;
320	}
321	/*
322	 * Discard null packets and check for packets that
323	 * require too many TX descriptors.  We try to convert
324	 * the latter to a cluster.
325	 */
326	if (bf->bf_nseg > ATH_MAX_SCATTER) {		/* too many desc's, linearize */
327		sc->sc_stats.ast_tx_linear++;
328		m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER);
329		if (m == NULL) {
330			ath_freetx(m0);
331			sc->sc_stats.ast_tx_nombuf++;
332			return ENOMEM;
333		}
334		m0 = m;
335		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
336					     bf->bf_segs, &bf->bf_nseg,
337					     BUS_DMA_NOWAIT);
338		if (error != 0) {
339			sc->sc_stats.ast_tx_busdma++;
340			ath_freetx(m0);
341			return error;
342		}
343		KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER,
344		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
345	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
346		sc->sc_stats.ast_tx_nodata++;
347		ath_freetx(m0);
348		return EIO;
349	}
350	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
351		__func__, m0, m0->m_pkthdr.len);
352	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
353	bf->bf_m = m0;
354
355	return 0;
356}
357
358/*
359 * Chain together segments+descriptors for a frame - 11n or otherwise.
360 *
361 * For aggregates, this is called on each frame in the aggregate.
362 */
363static void
364ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0,
365    struct ath_buf *bf, int is_aggr, int is_first_subframe,
366    int is_last_subframe)
367{
368	struct ath_hal *ah = sc->sc_ah;
369	char *ds;
370	int i, bp, dsp;
371	HAL_DMA_ADDR bufAddrList[4];
372	uint32_t segLenList[4];
373	int numTxMaps = 1;
374	int isFirstDesc = 1;
375
376	/*
377	 * XXX There's txdma and txdma_mgmt; the descriptor
378	 * sizes must match.
379	 */
380	struct ath_descdma *dd = &sc->sc_txdma;
381
382	/*
383	 * Fillin the remainder of the descriptor info.
384	 */
385
386	/*
387	 * We need the number of TX data pointers in each descriptor.
388	 * EDMA and later chips support 4 TX buffers per descriptor;
389	 * previous chips just support one.
390	 */
391	numTxMaps = sc->sc_tx_nmaps;
392
393	/*
394	 * For EDMA and later chips ensure the TX map is fully populated
395	 * before advancing to the next descriptor.
396	 */
397	ds = (char *) bf->bf_desc;
398	bp = dsp = 0;
399	bzero(bufAddrList, sizeof(bufAddrList));
400	bzero(segLenList, sizeof(segLenList));
401	for (i = 0; i < bf->bf_nseg; i++) {
402		bufAddrList[bp] = bf->bf_segs[i].ds_addr;
403		segLenList[bp] = bf->bf_segs[i].ds_len;
404		bp++;
405
406		/*
407		 * Go to the next segment if this isn't the last segment
408		 * and there's space in the current TX map.
409		 */
410		if ((i != bf->bf_nseg - 1) && (bp < numTxMaps))
411			continue;
412
413		/*
414		 * Last segment or we're out of buffer pointers.
415		 */
416		bp = 0;
417
418		if (i == bf->bf_nseg - 1)
419			ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0);
420		else
421			ath_hal_settxdesclink(ah, (struct ath_desc *) ds,
422			    bf->bf_daddr + dd->dd_descsize * (dsp + 1));
423
424		/*
425		 * XXX This assumes that bfs_txq is the actual destination
426		 * hardware queue at this point.  It may not have been
427		 * assigned, it may actually be pointing to the multicast
428		 * software TXQ id.  These must be fixed!
429		 */
430		ath_hal_filltxdesc(ah, (struct ath_desc *) ds
431			, bufAddrList
432			, segLenList
433			, bf->bf_descid		/* XXX desc id */
434			, bf->bf_state.bfs_tx_queue
435			, isFirstDesc		/* first segment */
436			, i == bf->bf_nseg - 1	/* last segment */
437			, (struct ath_desc *) ds0	/* first descriptor */
438		);
439
440		/*
441		 * Make sure the 11n aggregate fields are cleared.
442		 *
443		 * XXX TODO: this doesn't need to be called for
444		 * aggregate frames; as it'll be called on all
445		 * sub-frames.  Since the descriptors are in
446		 * non-cacheable memory, this leads to some
447		 * rather slow writes on MIPS/ARM platforms.
448		 */
449		if (ath_tx_is_11n(sc))
450			ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds);
451
452		/*
453		 * If 11n is enabled, set it up as if it's an aggregate
454		 * frame.
455		 */
456		if (is_last_subframe) {
457			ath_hal_set11n_aggr_last(sc->sc_ah,
458			    (struct ath_desc *) ds);
459		} else if (is_aggr) {
460			/*
461			 * This clears the aggrlen field; so
462			 * the caller needs to call set_aggr_first()!
463			 *
464			 * XXX TODO: don't call this for the first
465			 * descriptor in the first frame in an
466			 * aggregate!
467			 */
468			ath_hal_set11n_aggr_middle(sc->sc_ah,
469			    (struct ath_desc *) ds,
470			    bf->bf_state.bfs_ndelim);
471		}
472		isFirstDesc = 0;
473		bf->bf_lastds = (struct ath_desc *) ds;
474
475		/*
476		 * Don't forget to skip to the next descriptor.
477		 */
478		ds += sc->sc_tx_desclen;
479		dsp++;
480
481		/*
482		 * .. and don't forget to blank these out!
483		 */
484		bzero(bufAddrList, sizeof(bufAddrList));
485		bzero(segLenList, sizeof(segLenList));
486	}
487	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
488}
489
490/*
491 * Set the rate control fields in the given descriptor based on
492 * the bf_state fields and node state.
493 *
494 * The bfs fields should already be set with the relevant rate
495 * control information, including whether MRR is to be enabled.
496 *
497 * Since the FreeBSD HAL currently sets up the first TX rate
498 * in ath_hal_setuptxdesc(), this will setup the MRR
499 * conditionally for the pre-11n chips, and call ath_buf_set_rate
500 * unconditionally for 11n chips. These require the 11n rate
501 * scenario to be set if MCS rates are enabled, so it's easier
502 * to just always call it. The caller can then only set rates 2, 3
503 * and 4 if multi-rate retry is needed.
504 */
505static void
506ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
507    struct ath_buf *bf)
508{
509	struct ath_rc_series *rc = bf->bf_state.bfs_rc;
510
511	/* If mrr is disabled, blank tries 1, 2, 3 */
512	if (! bf->bf_state.bfs_ismrr)
513		rc[1].tries = rc[2].tries = rc[3].tries = 0;
514
515#if 0
516	/*
517	 * If NOACK is set, just set ntries=1.
518	 */
519	else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {
520		rc[1].tries = rc[2].tries = rc[3].tries = 0;
521		rc[0].tries = 1;
522	}
523#endif
524
525	/*
526	 * Always call - that way a retried descriptor will
527	 * have the MRR fields overwritten.
528	 *
529	 * XXX TODO: see if this is really needed - setting up
530	 * the first descriptor should set the MRR fields to 0
531	 * for us anyway.
532	 */
533	if (ath_tx_is_11n(sc)) {
534		ath_buf_set_rate(sc, ni, bf);
535	} else {
536		ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
537			, rc[1].ratecode, rc[1].tries
538			, rc[2].ratecode, rc[2].tries
539			, rc[3].ratecode, rc[3].tries
540		);
541	}
542}
543
544/*
545 * Setup segments+descriptors for an 11n aggregate.
546 * bf_first is the first buffer in the aggregate.
547 * The descriptor list must already been linked together using
548 * bf->bf_next.
549 */
550static void
551ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
552{
553	struct ath_buf *bf, *bf_prev = NULL;
554	struct ath_desc *ds0 = bf_first->bf_desc;
555
556	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",
557	    __func__, bf_first->bf_state.bfs_nframes,
558	    bf_first->bf_state.bfs_al);
559
560	bf = bf_first;
561
562	if (bf->bf_state.bfs_txrate0 == 0)
563		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n",
564		    __func__, bf, 0);
565	if (bf->bf_state.bfs_rc[0].ratecode == 0)
566		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n",
567		    __func__, bf, 0);
568
569	/*
570	 * Setup all descriptors of all subframes - this will
571	 * call ath_hal_set11naggrmiddle() on every frame.
572	 */
573	while (bf != NULL) {
574		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
575		    "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",
576		    __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
577		    SEQNO(bf->bf_state.bfs_seqno));
578
579		/*
580		 * Setup the initial fields for the first descriptor - all
581		 * the non-11n specific stuff.
582		 */
583		ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc
584			, bf->bf_state.bfs_pktlen	/* packet length */
585			, bf->bf_state.bfs_hdrlen	/* header length */
586			, bf->bf_state.bfs_atype	/* Atheros packet type */
587			, bf->bf_state.bfs_txpower	/* txpower */
588			, bf->bf_state.bfs_txrate0
589			, bf->bf_state.bfs_try0		/* series 0 rate/tries */
590			, bf->bf_state.bfs_keyix	/* key cache index */
591			, bf->bf_state.bfs_txantenna	/* antenna mode */
592			, bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ	/* flags */
593			, bf->bf_state.bfs_ctsrate	/* rts/cts rate */
594			, bf->bf_state.bfs_ctsduration	/* rts/cts duration */
595		);
596
597		/*
598		 * First descriptor? Setup the rate control and initial
599		 * aggregate header information.
600		 */
601		if (bf == bf_first) {
602			/*
603			 * setup first desc with rate and aggr info
604			 */
605			ath_tx_set_ratectrl(sc, bf->bf_node, bf);
606		}
607
608		/*
609		 * Setup the descriptors for a multi-descriptor frame.
610		 * This is both aggregate and non-aggregate aware.
611		 */
612		ath_tx_chaindesclist(sc, ds0, bf,
613		    1, /* is_aggr */
614		    !! (bf == bf_first), /* is_first_subframe */
615		    !! (bf->bf_next == NULL) /* is_last_subframe */
616		    );
617
618		if (bf == bf_first) {
619			/*
620			 * Initialise the first 11n aggregate with the
621			 * aggregate length and aggregate enable bits.
622			 */
623			ath_hal_set11n_aggr_first(sc->sc_ah,
624			    ds0,
625			    bf->bf_state.bfs_al,
626			    bf->bf_state.bfs_ndelim);
627		}
628
629		/*
630		 * Link the last descriptor of the previous frame
631		 * to the beginning descriptor of this frame.
632		 */
633		if (bf_prev != NULL)
634			ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds,
635			    bf->bf_daddr);
636
637		/* Save a copy so we can link the next descriptor in */
638		bf_prev = bf;
639		bf = bf->bf_next;
640	}
641
642	/*
643	 * Set the first descriptor bf_lastds field to point to
644	 * the last descriptor in the last subframe, that's where
645	 * the status update will occur.
646	 */
647	bf_first->bf_lastds = bf_prev->bf_lastds;
648
649	/*
650	 * And bf_last in the first descriptor points to the end of
651	 * the aggregate list.
652	 */
653	bf_first->bf_last = bf_prev;
654
655	/*
656	 * For non-AR9300 NICs, which require the rate control
657	 * in the final descriptor - let's set that up now.
658	 *
659	 * This is because the filltxdesc() HAL call doesn't
660	 * populate the last segment with rate control information
661	 * if firstSeg is also true.  For non-aggregate frames
662	 * that is fine, as the first frame already has rate control
663	 * info.  But if the last frame in an aggregate has one
664	 * descriptor, both firstseg and lastseg will be true and
665	 * the rate info isn't copied.
666	 *
667	 * This is inefficient on MIPS/ARM platforms that have
668	 * non-cachable memory for TX descriptors, but we'll just
669	 * make do for now.
670	 *
671	 * As to why the rate table is stashed in the last descriptor
672	 * rather than the first descriptor?  Because proctxdesc()
673	 * is called on the final descriptor in an MPDU or A-MPDU -
674	 * ie, the one that gets updated by the hardware upon
675	 * completion.  That way proctxdesc() doesn't need to know
676	 * about the first _and_ last TX descriptor.
677	 */
678	ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0);
679
680	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);
681}
682
683/*
684 * Hand-off a frame to the multicast TX queue.
685 *
686 * This is a software TXQ which will be appended to the CAB queue
687 * during the beacon setup code.
688 *
689 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID
690 * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated
691 * with the actual hardware txq, or all of this will fall apart.
692 *
693 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state
694 * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated
695 * correctly.
696 */
697static void
698ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
699    struct ath_buf *bf)
700{
701	ATH_TX_LOCK_ASSERT(sc);
702
703	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
704	     ("%s: busy status 0x%x", __func__, bf->bf_flags));
705
706	/*
707	 * Ensure that the tx queue is the cabq, so things get
708	 * mapped correctly.
709	 */
710	if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {
711		DPRINTF(sc, ATH_DEBUG_XMIT,
712		    "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
713		    __func__, bf, bf->bf_state.bfs_tx_queue,
714		    txq->axq_qnum);
715	}
716
717	ATH_TXQ_LOCK(txq);
718	if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
719		struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
720		struct ieee80211_frame *wh;
721
722		/* mark previous frame */
723		wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
724		wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
725		bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
726		    BUS_DMASYNC_PREWRITE);
727
728		/* link descriptor */
729		ath_hal_settxdesclink(sc->sc_ah,
730		    bf_last->bf_lastds,
731		    bf->bf_daddr);
732	}
733	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
734	ATH_TXQ_UNLOCK(txq);
735}
736
737/*
738 * Hand-off packet to a hardware queue.
739 */
740static void
741ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
742    struct ath_buf *bf)
743{
744	struct ath_hal *ah = sc->sc_ah;
745	struct ath_buf *bf_first;
746
747	/*
748	 * Insert the frame on the outbound list and pass it on
749	 * to the hardware.  Multicast frames buffered for power
750	 * save stations and transmit from the CAB queue are stored
751	 * on a s/w only queue and loaded on to the CAB queue in
752	 * the SWBA handler since frames only go out on DTIM and
753	 * to avoid possible races.
754	 */
755	ATH_TX_LOCK_ASSERT(sc);
756	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
757	     ("%s: busy status 0x%x", __func__, bf->bf_flags));
758	KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
759	     ("ath_tx_handoff_hw called for mcast queue"));
760
761	/*
762	 * XXX racy, should hold the PCU lock when checking this,
763	 * and also should ensure that the TX counter is >0!
764	 */
765	KASSERT((sc->sc_inreset_cnt == 0),
766	    ("%s: TX during reset?\n", __func__));
767
768#if 0
769	/*
770	 * This causes a LOR. Find out where the PCU lock is being
771	 * held whilst the TXQ lock is grabbed - that shouldn't
772	 * be occuring.
773	 */
774	ATH_PCU_LOCK(sc);
775	if (sc->sc_inreset_cnt) {
776		ATH_PCU_UNLOCK(sc);
777		DPRINTF(sc, ATH_DEBUG_RESET,
778		    "%s: called with sc_in_reset != 0\n",
779		    __func__);
780		DPRINTF(sc, ATH_DEBUG_XMIT,
781		    "%s: queued: TXDP[%u] = %p (%p) depth %d\n",
782		    __func__, txq->axq_qnum,
783		    (caddr_t)bf->bf_daddr, bf->bf_desc,
784		    txq->axq_depth);
785		/* XXX axq_link needs to be set and updated! */
786		ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
787		if (bf->bf_state.bfs_aggr)
788			txq->axq_aggr_depth++;
789		return;
790		}
791	ATH_PCU_UNLOCK(sc);
792#endif
793
794	ATH_TXQ_LOCK(txq);
795
796	/*
797	 * XXX TODO: if there's a holdingbf, then
798	 * ATH_TXQ_PUTRUNNING should be clear.
799	 *
800	 * If there is a holdingbf and the list is empty,
801	 * then axq_link should be pointing to the holdingbf.
802	 *
803	 * Otherwise it should point to the last descriptor
804	 * in the last ath_buf.
805	 *
806	 * In any case, we should really ensure that we
807	 * update the previous descriptor link pointer to
808	 * this descriptor, regardless of all of the above state.
809	 *
810	 * For now this is captured by having axq_link point
811	 * to either the holdingbf (if the TXQ list is empty)
812	 * or the end of the list (if the TXQ list isn't empty.)
813	 * I'd rather just kill axq_link here and do it as above.
814	 */
815
816	/*
817	 * Append the frame to the TX queue.
818	 */
819	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
820	ATH_KTR(sc, ATH_KTR_TX, 3,
821	    "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
822	    "depth=%d",
823	    txq->axq_qnum,
824	    bf,
825	    txq->axq_depth);
826
827	/*
828	 * If there's a link pointer, update it.
829	 *
830	 * XXX we should replace this with the above logic, just
831	 * to kill axq_link with fire.
832	 */
833	if (txq->axq_link != NULL) {
834		*txq->axq_link = bf->bf_daddr;
835		DPRINTF(sc, ATH_DEBUG_XMIT,
836		    "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
837		    txq->axq_qnum, txq->axq_link,
838		    (caddr_t)bf->bf_daddr, bf->bf_desc,
839		    txq->axq_depth);
840		ATH_KTR(sc, ATH_KTR_TX, 5,
841		    "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) "
842		    "lastds=%d",
843		    txq->axq_qnum, txq->axq_link,
844		    (caddr_t)bf->bf_daddr, bf->bf_desc,
845		    bf->bf_lastds);
846	}
847
848	/*
849	 * If we've not pushed anything into the hardware yet,
850	 * push the head of the queue into the TxDP.
851	 *
852	 * Once we've started DMA, there's no guarantee that
853	 * updating the TxDP with a new value will actually work.
854	 * So we just don't do that - if we hit the end of the list,
855	 * we keep that buffer around (the "holding buffer") and
856	 * re-start DMA by updating the link pointer of _that_
857	 * descriptor and then restart DMA.
858	 */
859	if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
860		bf_first = TAILQ_FIRST(&txq->axq_q);
861		txq->axq_flags |= ATH_TXQ_PUTRUNNING;
862		ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
863		DPRINTF(sc, ATH_DEBUG_XMIT,
864		    "%s: TXDP[%u] = %p (%p) depth %d\n",
865		    __func__, txq->axq_qnum,
866		    (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
867		    txq->axq_depth);
868		ATH_KTR(sc, ATH_KTR_TX, 5,
869		    "ath_tx_handoff: TXDP[%u] = %p (%p) "
870		    "lastds=%p depth %d",
871		    txq->axq_qnum,
872		    (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
873		    bf_first->bf_lastds,
874		    txq->axq_depth);
875	}
876
877	/*
878	 * Ensure that the bf TXQ matches this TXQ, so later
879	 * checking and holding buffer manipulation is sane.
880	 */
881	if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
882		DPRINTF(sc, ATH_DEBUG_XMIT,
883		    "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
884		    __func__, bf, bf->bf_state.bfs_tx_queue,
885		    txq->axq_qnum);
886	}
887
888	/*
889	 * Track aggregate queue depth.
890	 */
891	if (bf->bf_state.bfs_aggr)
892		txq->axq_aggr_depth++;
893
894	/*
895	 * Update the link pointer.
896	 */
897	ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
898
899	/*
900	 * Start DMA.
901	 *
902	 * If we wrote a TxDP above, DMA will start from here.
903	 *
904	 * If DMA is running, it'll do nothing.
905	 *
906	 * If the DMA engine hit the end of the QCU list (ie LINK=NULL,
907	 * or VEOL) then it stops at the last transmitted write.
908	 * We then append a new frame by updating the link pointer
909	 * in that descriptor and then kick TxE here; it will re-read
910	 * that last descriptor and find the new descriptor to transmit.
911	 *
912	 * This is why we keep the holding descriptor around.
913	 */
914	ath_hal_txstart(ah, txq->axq_qnum);
915	ATH_TXQ_UNLOCK(txq);
916	ATH_KTR(sc, ATH_KTR_TX, 1,
917	    "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
918}
919
920/*
921 * Restart TX DMA for the given TXQ.
922 *
923 * This must be called whether the queue is empty or not.
924 */
925static void
926ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
927{
928	struct ath_buf *bf, *bf_last;
929
930	ATH_TXQ_LOCK_ASSERT(txq);
931
932	/* XXX make this ATH_TXQ_FIRST */
933	bf = TAILQ_FIRST(&txq->axq_q);
934	bf_last = ATH_TXQ_LAST(txq, axq_q_s);
935
936	if (bf == NULL)
937		return;
938
939	DPRINTF(sc, ATH_DEBUG_RESET,
940	    "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n",
941	    __func__,
942	    txq->axq_qnum,
943	    bf,
944	    bf_last,
945	    (uint32_t) bf->bf_daddr);
946
947#ifdef	ATH_DEBUG
948	if (sc->sc_debug & ATH_DEBUG_RESET)
949		ath_tx_dump(sc, txq);
950#endif
951
952	/*
953	 * This is called from a restart, so DMA is known to be
954	 * completely stopped.
955	 */
956	KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
957	    ("%s: Q%d: called with PUTRUNNING=1\n",
958	    __func__,
959	    txq->axq_qnum));
960
961	ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
962	txq->axq_flags |= ATH_TXQ_PUTRUNNING;
963
964	ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds,
965	    &txq->axq_link);
966	ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
967}
968
969/*
970 * Hand off a packet to the hardware (or mcast queue.)
971 *
972 * The relevant hardware txq should be locked.
973 */
974static void
975ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
976    struct ath_buf *bf)
977{
978	ATH_TX_LOCK_ASSERT(sc);
979
980#ifdef	ATH_DEBUG_ALQ
981	if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
982		ath_tx_alq_post(sc, bf);
983#endif
984
985	if (txq->axq_qnum == ATH_TXQ_SWQ)
986		ath_tx_handoff_mcast(sc, txq, bf);
987	else
988		ath_tx_handoff_hw(sc, txq, bf);
989}
990
991static int
992ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
993    struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
994    int *keyix)
995{
996	DPRINTF(sc, ATH_DEBUG_XMIT,
997	    "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n",
998	    __func__,
999	    *hdrlen,
1000	    *pktlen,
1001	    isfrag,
1002	    iswep,
1003	    m0);
1004
1005	if (iswep) {
1006		const struct ieee80211_cipher *cip;
1007		struct ieee80211_key *k;
1008
1009		/*
1010		 * Construct the 802.11 header+trailer for an encrypted
1011		 * frame. The only reason this can fail is because of an
1012		 * unknown or unsupported cipher/key type.
1013		 */
1014		k = ieee80211_crypto_encap(ni, m0);
1015		if (k == NULL) {
1016			/*
1017			 * This can happen when the key is yanked after the
1018			 * frame was queued.  Just discard the frame; the
1019			 * 802.11 layer counts failures and provides
1020			 * debugging/diagnostics.
1021			 */
1022			return (0);
1023		}
1024		/*
1025		 * Adjust the packet + header lengths for the crypto
1026		 * additions and calculate the h/w key index.  When
1027		 * a s/w mic is done the frame will have had any mic
1028		 * added to it prior to entry so m0->m_pkthdr.len will
1029		 * account for it. Otherwise we need to add it to the
1030		 * packet length.
1031		 */
1032		cip = k->wk_cipher;
1033		(*hdrlen) += cip->ic_header;
1034		(*pktlen) += cip->ic_header + cip->ic_trailer;
1035		/* NB: frags always have any TKIP MIC done in s/w */
1036		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
1037			(*pktlen) += cip->ic_miclen;
1038		(*keyix) = k->wk_keyix;
1039	} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
1040		/*
1041		 * Use station key cache slot, if assigned.
1042		 */
1043		(*keyix) = ni->ni_ucastkey.wk_keyix;
1044		if ((*keyix) == IEEE80211_KEYIX_NONE)
1045			(*keyix) = HAL_TXKEYIX_INVALID;
1046	} else
1047		(*keyix) = HAL_TXKEYIX_INVALID;
1048
1049	return (1);
1050}
1051
1052/*
1053 * Calculate whether interoperability protection is required for
1054 * this frame.
1055 *
1056 * This requires the rate control information be filled in,
1057 * as the protection requirement depends upon the current
1058 * operating mode / PHY.
1059 */
1060static void
1061ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf)
1062{
1063	struct ieee80211_frame *wh;
1064	uint8_t rix;
1065	uint16_t flags;
1066	int shortPreamble;
1067	const HAL_RATE_TABLE *rt = sc->sc_currates;
1068	struct ifnet *ifp = sc->sc_ifp;
1069	struct ieee80211com *ic = ifp->if_l2com;
1070
1071	flags = bf->bf_state.bfs_txflags;
1072	rix = bf->bf_state.bfs_rc[0].rix;
1073	shortPreamble = bf->bf_state.bfs_shpream;
1074	wh = mtod(bf->bf_m, struct ieee80211_frame *);
1075
1076	/*
1077	 * If 802.11g protection is enabled, determine whether
1078	 * to use RTS/CTS or just CTS.  Note that this is only
1079	 * done for OFDM unicast frames.
1080	 */
1081	if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1082	    rt->info[rix].phy == IEEE80211_T_OFDM &&
1083	    (flags & HAL_TXDESC_NOACK) == 0) {
1084		bf->bf_state.bfs_doprot = 1;
1085		/* XXX fragments must use CCK rates w/ protection */
1086		if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1087			flags |= HAL_TXDESC_RTSENA;
1088		} else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1089			flags |= HAL_TXDESC_CTSENA;
1090		}
1091		/*
1092		 * For frags it would be desirable to use the
1093		 * highest CCK rate for RTS/CTS.  But stations
1094		 * farther away may detect it at a lower CCK rate
1095		 * so use the configured protection rate instead
1096		 * (for now).
1097		 */
1098		sc->sc_stats.ast_tx_protect++;
1099	}
1100
1101	/*
1102	 * If 11n protection is enabled and it's a HT frame,
1103	 * enable RTS.
1104	 *
1105	 * XXX ic_htprotmode or ic_curhtprotmode?
1106	 * XXX should it_htprotmode only matter if ic_curhtprotmode
1107	 * XXX indicates it's not a HT pure environment?
1108	 */
1109	if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
1110	    rt->info[rix].phy == IEEE80211_T_HT &&
1111	    (flags & HAL_TXDESC_NOACK) == 0) {
1112		flags |= HAL_TXDESC_RTSENA;
1113		sc->sc_stats.ast_tx_htprotect++;
1114	}
1115	bf->bf_state.bfs_txflags = flags;
1116}
1117
1118/*
1119 * Update the frame duration given the currently selected rate.
1120 *
1121 * This also updates the frame duration value, so it will require
1122 * a DMA flush.
1123 */
1124static void
1125ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
1126{
1127	struct ieee80211_frame *wh;
1128	uint8_t rix;
1129	uint16_t flags;
1130	int shortPreamble;
1131	struct ath_hal *ah = sc->sc_ah;
1132	const HAL_RATE_TABLE *rt = sc->sc_currates;
1133	int isfrag = bf->bf_m->m_flags & M_FRAG;
1134
1135	flags = bf->bf_state.bfs_txflags;
1136	rix = bf->bf_state.bfs_rc[0].rix;
1137	shortPreamble = bf->bf_state.bfs_shpream;
1138	wh = mtod(bf->bf_m, struct ieee80211_frame *);
1139
1140	/*
1141	 * Calculate duration.  This logically belongs in the 802.11
1142	 * layer but it lacks sufficient information to calculate it.
1143	 */
1144	if ((flags & HAL_TXDESC_NOACK) == 0 &&
1145	    (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
1146		u_int16_t dur;
1147		if (shortPreamble)
1148			dur = rt->info[rix].spAckDuration;
1149		else
1150			dur = rt->info[rix].lpAckDuration;
1151		if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
1152			dur += dur;		/* additional SIFS+ACK */
1153			/*
1154			 * Include the size of next fragment so NAV is
1155			 * updated properly.  The last fragment uses only
1156			 * the ACK duration
1157			 *
1158			 * XXX TODO: ensure that the rate lookup for each
1159			 * fragment is the same as the rate used by the
1160			 * first fragment!
1161			 */
1162			dur += ath_hal_computetxtime(ah,
1163			    rt,
1164			    bf->bf_nextfraglen,
1165			    rix, shortPreamble);
1166		}
1167		if (isfrag) {
1168			/*
1169			 * Force hardware to use computed duration for next
1170			 * fragment by disabling multi-rate retry which updates
1171			 * duration based on the multi-rate duration table.
1172			 */
1173			bf->bf_state.bfs_ismrr = 0;
1174			bf->bf_state.bfs_try0 = ATH_TXMGTTRY;
1175			/* XXX update bfs_rc[0].try? */
1176		}
1177
1178		/* Update the duration field itself */
1179		*(u_int16_t *)wh->i_dur = htole16(dur);
1180	}
1181}
1182
1183static uint8_t
1184ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
1185    int cix, int shortPreamble)
1186{
1187	uint8_t ctsrate;
1188
1189	/*
1190	 * CTS transmit rate is derived from the transmit rate
1191	 * by looking in the h/w rate table.  We must also factor
1192	 * in whether or not a short preamble is to be used.
1193	 */
1194	/* NB: cix is set above where RTS/CTS is enabled */
1195	KASSERT(cix != 0xff, ("cix not setup"));
1196	ctsrate = rt->info[cix].rateCode;
1197
1198	/* XXX this should only matter for legacy rates */
1199	if (shortPreamble)
1200		ctsrate |= rt->info[cix].shortPreamble;
1201
1202	return (ctsrate);
1203}
1204
1205/*
1206 * Calculate the RTS/CTS duration for legacy frames.
1207 */
1208static int
1209ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
1210    int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,
1211    int flags)
1212{
1213	int ctsduration = 0;
1214
1215	/* This mustn't be called for HT modes */
1216	if (rt->info[cix].phy == IEEE80211_T_HT) {
1217		printf("%s: HT rate where it shouldn't be (0x%x)\n",
1218		    __func__, rt->info[cix].rateCode);
1219		return (-1);
1220	}
1221
1222	/*
1223	 * Compute the transmit duration based on the frame
1224	 * size and the size of an ACK frame.  We call into the
1225	 * HAL to do the computation since it depends on the
1226	 * characteristics of the actual PHY being used.
1227	 *
1228	 * NB: CTS is assumed the same size as an ACK so we can
1229	 *     use the precalculated ACK durations.
1230	 */
1231	if (shortPreamble) {
1232		if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
1233			ctsduration += rt->info[cix].spAckDuration;
1234		ctsduration += ath_hal_computetxtime(ah,
1235			rt, pktlen, rix, AH_TRUE);
1236		if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
1237			ctsduration += rt->info[rix].spAckDuration;
1238	} else {
1239		if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
1240			ctsduration += rt->info[cix].lpAckDuration;
1241		ctsduration += ath_hal_computetxtime(ah,
1242			rt, pktlen, rix, AH_FALSE);
1243		if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
1244			ctsduration += rt->info[rix].lpAckDuration;
1245	}
1246
1247	return (ctsduration);
1248}
1249
1250/*
1251 * Update the given ath_buf with updated rts/cts setup and duration
1252 * values.
1253 *
1254 * To support rate lookups for each software retry, the rts/cts rate
1255 * and cts duration must be re-calculated.
1256 *
1257 * This function assumes the RTS/CTS flags have been set as needed;
1258 * mrr has been disabled; and the rate control lookup has been done.
1259 *
1260 * XXX TODO: MRR need only be disabled for the pre-11n NICs.
1261 * XXX The 11n NICs support per-rate RTS/CTS configuration.
1262 */
1263static void
1264ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
1265{
1266	uint16_t ctsduration = 0;
1267	uint8_t ctsrate = 0;
1268	uint8_t rix = bf->bf_state.bfs_rc[0].rix;
1269	uint8_t cix = 0;
1270	const HAL_RATE_TABLE *rt = sc->sc_currates;
1271
1272	/*
1273	 * No RTS/CTS enabled? Don't bother.
1274	 */
1275	if ((bf->bf_state.bfs_txflags &
1276	    (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) {
1277		/* XXX is this really needed? */
1278		bf->bf_state.bfs_ctsrate = 0;
1279		bf->bf_state.bfs_ctsduration = 0;
1280		return;
1281	}
1282
1283	/*
1284	 * If protection is enabled, use the protection rix control
1285	 * rate. Otherwise use the rate0 control rate.
1286	 */
1287	if (bf->bf_state.bfs_doprot)
1288		rix = sc->sc_protrix;
1289	else
1290		rix = bf->bf_state.bfs_rc[0].rix;
1291
1292	/*
1293	 * If the raw path has hard-coded ctsrate0 to something,
1294	 * use it.
1295	 */
1296	if (bf->bf_state.bfs_ctsrate0 != 0)
1297		cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
1298	else
1299		/* Control rate from above */
1300		cix = rt->info[rix].controlRate;
1301
1302	/* Calculate the rtscts rate for the given cix */
1303	ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
1304	    bf->bf_state.bfs_shpream);
1305
1306	/* The 11n chipsets do ctsduration calculations for you */
1307	if (! ath_tx_is_11n(sc))
1308		ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
1309		    bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,
1310		    rt, bf->bf_state.bfs_txflags);
1311
1312	/* Squirrel away in ath_buf */
1313	bf->bf_state.bfs_ctsrate = ctsrate;
1314	bf->bf_state.bfs_ctsduration = ctsduration;
1315
1316	/*
1317	 * Must disable multi-rate retry when using RTS/CTS.
1318	 */
1319	if (!sc->sc_mrrprot) {
1320		bf->bf_state.bfs_ismrr = 0;
1321		bf->bf_state.bfs_try0 =
1322		    bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
1323	}
1324}
1325
1326/*
1327 * Setup the descriptor chain for a normal or fast-frame
1328 * frame.
1329 *
1330 * XXX TODO: extend to include the destination hardware QCU ID.
1331 * Make sure that is correct.  Make sure that when being added
1332 * to the mcastq, the CABQ QCUID is set or things will get a bit
1333 * odd.
1334 */
1335static void
1336ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
1337{
1338	struct ath_desc *ds = bf->bf_desc;
1339	struct ath_hal *ah = sc->sc_ah;
1340
1341	if (bf->bf_state.bfs_txrate0 == 0)
1342		DPRINTF(sc, ATH_DEBUG_XMIT,
1343		    "%s: bf=%p, txrate0=%d\n", __func__, bf, 0);
1344
1345	ath_hal_setuptxdesc(ah, ds
1346		, bf->bf_state.bfs_pktlen	/* packet length */
1347		, bf->bf_state.bfs_hdrlen	/* header length */
1348		, bf->bf_state.bfs_atype	/* Atheros packet type */
1349		, bf->bf_state.bfs_txpower	/* txpower */
1350		, bf->bf_state.bfs_txrate0
1351		, bf->bf_state.bfs_try0		/* series 0 rate/tries */
1352		, bf->bf_state.bfs_keyix	/* key cache index */
1353		, bf->bf_state.bfs_txantenna	/* antenna mode */
1354		, bf->bf_state.bfs_txflags	/* flags */
1355		, bf->bf_state.bfs_ctsrate	/* rts/cts rate */
1356		, bf->bf_state.bfs_ctsduration	/* rts/cts duration */
1357	);
1358
1359	/*
1360	 * This will be overriden when the descriptor chain is written.
1361	 */
1362	bf->bf_lastds = ds;
1363	bf->bf_last = bf;
1364
1365	/* Set rate control and descriptor chain for this frame */
1366	ath_tx_set_ratectrl(sc, bf->bf_node, bf);
1367	ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0);
1368}
1369
1370/*
1371 * Do a rate lookup.
1372 *
1373 * This performs a rate lookup for the given ath_buf only if it's required.
1374 * Non-data frames and raw frames don't require it.
1375 *
1376 * This populates the primary and MRR entries; MRR values are
1377 * then disabled later on if something requires it (eg RTS/CTS on
1378 * pre-11n chipsets.
1379 *
1380 * This needs to be done before the RTS/CTS fields are calculated
1381 * as they may depend upon the rate chosen.
1382 */
1383static void
1384ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf)
1385{
1386	uint8_t rate, rix;
1387	int try0;
1388
1389	if (! bf->bf_state.bfs_doratelookup)
1390		return;
1391
1392	/* Get rid of any previous state */
1393	bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1394
1395	ATH_NODE_LOCK(ATH_NODE(bf->bf_node));
1396	ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
1397	    bf->bf_state.bfs_pktlen, &rix, &try0, &rate);
1398
1399	/* In case MRR is disabled, make sure rc[0] is setup correctly */
1400	bf->bf_state.bfs_rc[0].rix = rix;
1401	bf->bf_state.bfs_rc[0].ratecode = rate;
1402	bf->bf_state.bfs_rc[0].tries = try0;
1403
1404	if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
1405		ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
1406		    bf->bf_state.bfs_rc);
1407	ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));
1408
1409	sc->sc_txrix = rix;	/* for LED blinking */
1410	sc->sc_lastdatarix = rix;	/* for fast frames */
1411	bf->bf_state.bfs_try0 = try0;
1412	bf->bf_state.bfs_txrate0 = rate;
1413}
1414
1415/*
1416 * Update the CLRDMASK bit in the ath_buf if it needs to be set.
1417 */
1418static void
1419ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid,
1420    struct ath_buf *bf)
1421{
1422	struct ath_node *an = ATH_NODE(bf->bf_node);
1423
1424	ATH_TX_LOCK_ASSERT(sc);
1425
1426	if (an->clrdmask == 1) {
1427		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1428		an->clrdmask = 0;
1429	}
1430}
1431
1432/*
1433 * Return whether this frame should be software queued or
1434 * direct dispatched.
1435 *
1436 * When doing powersave, BAR frames should be queued but other management
1437 * frames should be directly sent.
1438 *
1439 * When not doing powersave, stick BAR frames into the hardware queue
1440 * so it goes out even though the queue is paused.
1441 *
1442 * For now, management frames are also software queued by default.
1443 */
1444static int
1445ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an,
1446    struct mbuf *m0, int *queue_to_head)
1447{
1448	struct ieee80211_node *ni = &an->an_node;
1449	struct ieee80211_frame *wh;
1450	uint8_t type, subtype;
1451
1452	wh = mtod(m0, struct ieee80211_frame *);
1453	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1454	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1455
1456	(*queue_to_head) = 0;
1457
1458	/* If it's not in powersave - direct-dispatch BAR */
1459	if ((ATH_NODE(ni)->an_is_powersave == 0)
1460	    && type == IEEE80211_FC0_TYPE_CTL &&
1461	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1462		DPRINTF(sc, ATH_DEBUG_SW_TX,
1463		    "%s: BAR: TX'ing direct\n", __func__);
1464		return (0);
1465	} else if ((ATH_NODE(ni)->an_is_powersave == 1)
1466	    && type == IEEE80211_FC0_TYPE_CTL &&
1467	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1468		/* BAR TX whilst asleep; queue */
1469		DPRINTF(sc, ATH_DEBUG_SW_TX,
1470		    "%s: swq: TX'ing\n", __func__);
1471		(*queue_to_head) = 1;
1472		return (1);
1473	} else if ((ATH_NODE(ni)->an_is_powersave == 1)
1474	    && (type == IEEE80211_FC0_TYPE_MGT ||
1475	        type == IEEE80211_FC0_TYPE_CTL)) {
1476		/*
1477		 * Other control/mgmt frame; bypass software queuing
1478		 * for now!
1479		 */
1480		DPRINTF(sc, ATH_DEBUG_XMIT,
1481		    "%s: %6D: Node is asleep; sending mgmt "
1482		    "(type=%d, subtype=%d)\n",
1483		    __func__, ni->ni_macaddr, ":", type, subtype);
1484		return (0);
1485	} else {
1486		return (1);
1487	}
1488}
1489
1490
1491/*
1492 * Transmit the given frame to the hardware.
1493 *
1494 * The frame must already be setup; rate control must already have
1495 * been done.
1496 *
1497 * XXX since the TXQ lock is being held here (and I dislike holding
1498 * it for this long when not doing software aggregation), later on
1499 * break this function into "setup_normal" and "xmit_normal". The
1500 * lock only needs to be held for the ath_tx_handoff call.
1501 *
1502 * XXX we don't update the leak count here - if we're doing
1503 * direct frame dispatch, we need to be able to do it without
1504 * decrementing the leak count (eg multicast queue frames.)
1505 */
1506static void
1507ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1508    struct ath_buf *bf)
1509{
1510	struct ath_node *an = ATH_NODE(bf->bf_node);
1511	struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
1512
1513	ATH_TX_LOCK_ASSERT(sc);
1514
1515	/*
1516	 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does
1517	 * set a completion handler however it doesn't (yet) properly
1518	 * handle the strict ordering requirements needed for normal,
1519	 * non-aggregate session frames.
1520	 *
1521	 * Once this is implemented, only set CLRDMASK like this for
1522	 * frames that must go out - eg management/raw frames.
1523	 */
1524	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1525
1526	/* Setup the descriptor before handoff */
1527	ath_tx_do_ratelookup(sc, bf);
1528	ath_tx_calc_duration(sc, bf);
1529	ath_tx_calc_protection(sc, bf);
1530	ath_tx_set_rtscts(sc, bf);
1531	ath_tx_rate_fill_rcflags(sc, bf);
1532	ath_tx_setds(sc, bf);
1533
1534	/* Track per-TID hardware queue depth correctly */
1535	tid->hwq_depth++;
1536
1537	/* Assign the completion handler */
1538	bf->bf_comp = ath_tx_normal_comp;
1539
1540	/* Hand off to hardware */
1541	ath_tx_handoff(sc, txq, bf);
1542}
1543
1544/*
1545 * Do the basic frame setup stuff that's required before the frame
1546 * is added to a software queue.
1547 *
1548 * All frames get mostly the same treatment and it's done once.
1549 * Retransmits fiddle with things like the rate control setup,
1550 * setting the retransmit bit in the packet; doing relevant DMA/bus
1551 * syncing and relinking it (back) into the hardware TX queue.
1552 *
1553 * Note that this may cause the mbuf to be reallocated, so
1554 * m0 may not be valid.
1555 */
1556static int
1557ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
1558    struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1559{
1560	struct ieee80211vap *vap = ni->ni_vap;
1561	struct ath_hal *ah = sc->sc_ah;
1562	struct ifnet *ifp = sc->sc_ifp;
1563	struct ieee80211com *ic = ifp->if_l2com;
1564	const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
1565	int error, iswep, ismcast, isfrag, ismrr;
1566	int keyix, hdrlen, pktlen, try0 = 0;
1567	u_int8_t rix = 0, txrate = 0;
1568	struct ath_desc *ds;
1569	struct ieee80211_frame *wh;
1570	u_int subtype, flags;
1571	HAL_PKT_TYPE atype;
1572	const HAL_RATE_TABLE *rt;
1573	HAL_BOOL shortPreamble;
1574	struct ath_node *an;
1575	u_int pri;
1576
1577	/*
1578	 * To ensure that both sequence numbers and the CCMP PN handling
1579	 * is "correct", make sure that the relevant TID queue is locked.
1580	 * Otherwise the CCMP PN and seqno may appear out of order, causing
1581	 * re-ordered frames to have out of order CCMP PN's, resulting
1582	 * in many, many frame drops.
1583	 */
1584	ATH_TX_LOCK_ASSERT(sc);
1585
1586	wh = mtod(m0, struct ieee80211_frame *);
1587	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
1588	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1589	isfrag = m0->m_flags & M_FRAG;
1590	hdrlen = ieee80211_anyhdrsize(wh);
1591	/*
1592	 * Packet length must not include any
1593	 * pad bytes; deduct them here.
1594	 */
1595	pktlen = m0->m_pkthdr.len - (hdrlen & 3);
1596
1597	/* Handle encryption twiddling if needed */
1598	if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
1599	    &pktlen, &keyix)) {
1600		ath_freetx(m0);
1601		return EIO;
1602	}
1603
1604	/* packet header may have moved, reset our local pointer */
1605	wh = mtod(m0, struct ieee80211_frame *);
1606
1607	pktlen += IEEE80211_CRC_LEN;
1608
1609	/*
1610	 * Load the DMA map so any coalescing is done.  This
1611	 * also calculates the number of descriptors we need.
1612	 */
1613	error = ath_tx_dmasetup(sc, bf, m0);
1614	if (error != 0)
1615		return error;
1616	bf->bf_node = ni;			/* NB: held reference */
1617	m0 = bf->bf_m;				/* NB: may have changed */
1618	wh = mtod(m0, struct ieee80211_frame *);
1619
1620	/* setup descriptors */
1621	ds = bf->bf_desc;
1622	rt = sc->sc_currates;
1623	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
1624
1625	/*
1626	 * NB: the 802.11 layer marks whether or not we should
1627	 * use short preamble based on the current mode and
1628	 * negotiated parameters.
1629	 */
1630	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1631	    (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
1632		shortPreamble = AH_TRUE;
1633		sc->sc_stats.ast_tx_shortpre++;
1634	} else {
1635		shortPreamble = AH_FALSE;
1636	}
1637
1638	an = ATH_NODE(ni);
1639	//flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
1640	flags = 0;
1641	ismrr = 0;				/* default no multi-rate retry*/
1642	pri = M_WME_GETAC(m0);			/* honor classification */
1643	/* XXX use txparams instead of fixed values */
1644	/*
1645	 * Calculate Atheros packet type from IEEE80211 packet header,
1646	 * setup for rate calculations, and select h/w transmit queue.
1647	 */
1648	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1649	case IEEE80211_FC0_TYPE_MGT:
1650		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1651		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
1652			atype = HAL_PKT_TYPE_BEACON;
1653		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
1654			atype = HAL_PKT_TYPE_PROBE_RESP;
1655		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
1656			atype = HAL_PKT_TYPE_ATIM;
1657		else
1658			atype = HAL_PKT_TYPE_NORMAL;	/* XXX */
1659		rix = an->an_mgmtrix;
1660		txrate = rt->info[rix].rateCode;
1661		if (shortPreamble)
1662			txrate |= rt->info[rix].shortPreamble;
1663		try0 = ATH_TXMGTTRY;
1664		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
1665		break;
1666	case IEEE80211_FC0_TYPE_CTL:
1667		atype = HAL_PKT_TYPE_PSPOLL;	/* stop setting of duration */
1668		rix = an->an_mgmtrix;
1669		txrate = rt->info[rix].rateCode;
1670		if (shortPreamble)
1671			txrate |= rt->info[rix].shortPreamble;
1672		try0 = ATH_TXMGTTRY;
1673		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
1674		break;
1675	case IEEE80211_FC0_TYPE_DATA:
1676		atype = HAL_PKT_TYPE_NORMAL;		/* default */
1677		/*
1678		 * Data frames: multicast frames go out at a fixed rate,
1679		 * EAPOL frames use the mgmt frame rate; otherwise consult
1680		 * the rate control module for the rate to use.
1681		 */
1682		if (ismcast) {
1683			rix = an->an_mcastrix;
1684			txrate = rt->info[rix].rateCode;
1685			if (shortPreamble)
1686				txrate |= rt->info[rix].shortPreamble;
1687			try0 = 1;
1688		} else if (m0->m_flags & M_EAPOL) {
1689			/* XXX? maybe always use long preamble? */
1690			rix = an->an_mgmtrix;
1691			txrate = rt->info[rix].rateCode;
1692			if (shortPreamble)
1693				txrate |= rt->info[rix].shortPreamble;
1694			try0 = ATH_TXMAXTRY;	/* XXX?too many? */
1695		} else {
1696			/*
1697			 * Do rate lookup on each TX, rather than using
1698			 * the hard-coded TX information decided here.
1699			 */
1700			ismrr = 1;
1701			bf->bf_state.bfs_doratelookup = 1;
1702		}
1703		if (cap->cap_wmeParams[pri].wmep_noackPolicy)
1704			flags |= HAL_TXDESC_NOACK;
1705		break;
1706	default:
1707		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
1708			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
1709		/* XXX statistic */
1710		/* XXX free tx dmamap */
1711		ath_freetx(m0);
1712		return EIO;
1713	}
1714
1715	/*
1716	 * There are two known scenarios where the frame AC doesn't match
1717	 * what the destination TXQ is.
1718	 *
1719	 * + non-QoS frames (eg management?) that the net80211 stack has
1720	 *   assigned a higher AC to, but since it's a non-QoS TID, it's
1721	 *   being thrown into TID 16.  TID 16 gets the AC_BE queue.
1722	 *   It's quite possible that management frames should just be
1723	 *   direct dispatched to hardware rather than go via the software
1724	 *   queue; that should be investigated in the future.  There are
1725	 *   some specific scenarios where this doesn't make sense, mostly
1726	 *   surrounding ADDBA request/response - hence why that is special
1727	 *   cased.
1728	 *
1729	 * + Multicast frames going into the VAP mcast queue.  That shows up
1730	 *   as "TXQ 11".
1731	 *
1732	 * This driver should eventually support separate TID and TXQ locking,
1733	 * allowing for arbitrary AC frames to appear on arbitrary software
1734	 * queues, being queued to the "correct" hardware queue when needed.
1735	 */
1736#if 0
1737	if (txq != sc->sc_ac2q[pri]) {
1738		DPRINTF(sc, ATH_DEBUG_XMIT,
1739		    "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
1740		    __func__,
1741		    txq,
1742		    txq->axq_qnum,
1743		    pri,
1744		    sc->sc_ac2q[pri],
1745		    sc->sc_ac2q[pri]->axq_qnum);
1746	}
1747#endif
1748
1749	/*
1750	 * Calculate miscellaneous flags.
1751	 */
1752	if (ismcast) {
1753		flags |= HAL_TXDESC_NOACK;	/* no ack on broad/multicast */
1754	} else if (pktlen > vap->iv_rtsthreshold &&
1755	    (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
1756		flags |= HAL_TXDESC_RTSENA;	/* RTS based on frame length */
1757		sc->sc_stats.ast_tx_rts++;
1758	}
1759	if (flags & HAL_TXDESC_NOACK)		/* NB: avoid double counting */
1760		sc->sc_stats.ast_tx_noack++;
1761#ifdef IEEE80211_SUPPORT_TDMA
1762	if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
1763		DPRINTF(sc, ATH_DEBUG_TDMA,
1764		    "%s: discard frame, ACK required w/ TDMA\n", __func__);
1765		sc->sc_stats.ast_tdma_ack++;
1766		/* XXX free tx dmamap */
1767		ath_freetx(m0);
1768		return EIO;
1769	}
1770#endif
1771
1772	/*
1773	 * Determine if a tx interrupt should be generated for
1774	 * this descriptor.  We take a tx interrupt to reap
1775	 * descriptors when the h/w hits an EOL condition or
1776	 * when the descriptor is specifically marked to generate
1777	 * an interrupt.  We periodically mark descriptors in this
1778	 * way to insure timely replenishing of the supply needed
1779	 * for sending frames.  Defering interrupts reduces system
1780	 * load and potentially allows more concurrent work to be
1781	 * done but if done to aggressively can cause senders to
1782	 * backup.
1783	 *
1784	 * NB: use >= to deal with sc_txintrperiod changing
1785	 *     dynamically through sysctl.
1786	 */
1787	if (flags & HAL_TXDESC_INTREQ) {
1788		txq->axq_intrcnt = 0;
1789	} else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1790		flags |= HAL_TXDESC_INTREQ;
1791		txq->axq_intrcnt = 0;
1792	}
1793
1794	/* This point forward is actual TX bits */
1795
1796	/*
1797	 * At this point we are committed to sending the frame
1798	 * and we don't need to look at m_nextpkt; clear it in
1799	 * case this frame is part of frag chain.
1800	 */
1801	m0->m_nextpkt = NULL;
1802
1803	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
1804		ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
1805		    sc->sc_hwmap[rix].ieeerate, -1);
1806
1807	if (ieee80211_radiotap_active_vap(vap)) {
1808		u_int64_t tsf = ath_hal_gettsf64(ah);
1809
1810		sc->sc_tx_th.wt_tsf = htole64(tsf);
1811		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
1812		if (iswep)
1813			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
1814		if (isfrag)
1815			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
1816		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
1817		sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni);
1818		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
1819
1820		ieee80211_radiotap_tx(vap, m0);
1821	}
1822
1823	/* Blank the legacy rate array */
1824	bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1825
1826	/*
1827	 * ath_buf_set_rate needs at least one rate/try to setup
1828	 * the rate scenario.
1829	 */
1830	bf->bf_state.bfs_rc[0].rix = rix;
1831	bf->bf_state.bfs_rc[0].tries = try0;
1832	bf->bf_state.bfs_rc[0].ratecode = txrate;
1833
1834	/* Store the decided rate index values away */
1835	bf->bf_state.bfs_pktlen = pktlen;
1836	bf->bf_state.bfs_hdrlen = hdrlen;
1837	bf->bf_state.bfs_atype = atype;
1838	bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni);
1839	bf->bf_state.bfs_txrate0 = txrate;
1840	bf->bf_state.bfs_try0 = try0;
1841	bf->bf_state.bfs_keyix = keyix;
1842	bf->bf_state.bfs_txantenna = sc->sc_txantenna;
1843	bf->bf_state.bfs_txflags = flags;
1844	bf->bf_state.bfs_shpream = shortPreamble;
1845
1846	/* XXX this should be done in ath_tx_setrate() */
1847	bf->bf_state.bfs_ctsrate0 = 0;	/* ie, no hard-coded ctsrate */
1848	bf->bf_state.bfs_ctsrate = 0;	/* calculated later */
1849	bf->bf_state.bfs_ctsduration = 0;
1850	bf->bf_state.bfs_ismrr = ismrr;
1851
1852	return 0;
1853}
1854
1855/*
1856 * Queue a frame to the hardware or software queue.
1857 *
1858 * This can be called by the net80211 code.
1859 *
1860 * XXX what about locking? Or, push the seqno assign into the
1861 * XXX aggregate scheduler so its serialised?
1862 *
1863 * XXX When sending management frames via ath_raw_xmit(),
1864 *     should CLRDMASK be set unconditionally?
1865 */
1866int
1867ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
1868    struct ath_buf *bf, struct mbuf *m0)
1869{
1870	struct ieee80211vap *vap = ni->ni_vap;
1871	struct ath_vap *avp = ATH_VAP(vap);
1872	int r = 0;
1873	u_int pri;
1874	int tid;
1875	struct ath_txq *txq;
1876	int ismcast;
1877	const struct ieee80211_frame *wh;
1878	int is_ampdu, is_ampdu_tx, is_ampdu_pending;
1879	ieee80211_seq seqno;
1880	uint8_t type, subtype;
1881	int queue_to_head;
1882
1883	ATH_TX_LOCK_ASSERT(sc);
1884
1885	/*
1886	 * Determine the target hardware queue.
1887	 *
1888	 * For multicast frames, the txq gets overridden appropriately
1889	 * depending upon the state of PS.
1890	 *
1891	 * For any other frame, we do a TID/QoS lookup inside the frame
1892	 * to see what the TID should be. If it's a non-QoS frame, the
1893	 * AC and TID are overridden. The TID/TXQ code assumes the
1894	 * TID is on a predictable hardware TXQ, so we don't support
1895	 * having a node TID queued to multiple hardware TXQs.
1896	 * This may change in the future but would require some locking
1897	 * fudgery.
1898	 */
1899	pri = ath_tx_getac(sc, m0);
1900	tid = ath_tx_gettid(sc, m0);
1901
1902	txq = sc->sc_ac2q[pri];
1903	wh = mtod(m0, struct ieee80211_frame *);
1904	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1905	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1906	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1907
1908	/*
1909	 * Enforce how deep the multicast queue can grow.
1910	 *
1911	 * XXX duplicated in ath_raw_xmit().
1912	 */
1913	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1914		if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
1915		    > sc->sc_txq_mcastq_maxdepth) {
1916			sc->sc_stats.ast_tx_mcastq_overflow++;
1917			m_freem(m0);
1918			return (ENOBUFS);
1919		}
1920	}
1921
1922	/*
1923	 * Enforce how deep the unicast queue can grow.
1924	 *
1925	 * If the node is in power save then we don't want
1926	 * the software queue to grow too deep, or a node may
1927	 * end up consuming all of the ath_buf entries.
1928	 *
1929	 * For now, only do this for DATA frames.
1930	 *
1931	 * We will want to cap how many management/control
1932	 * frames get punted to the software queue so it doesn't
1933	 * fill up.  But the correct solution isn't yet obvious.
1934	 * In any case, this check should at least let frames pass
1935	 * that we are direct-dispatching.
1936	 *
1937	 * XXX TODO: duplicate this to the raw xmit path!
1938	 */
1939	if (type == IEEE80211_FC0_TYPE_DATA &&
1940	    ATH_NODE(ni)->an_is_powersave &&
1941	    ATH_NODE(ni)->an_swq_depth >
1942	     sc->sc_txq_node_psq_maxdepth) {
1943		sc->sc_stats.ast_tx_node_psq_overflow++;
1944		m_freem(m0);
1945		return (ENOBUFS);
1946	}
1947
1948	/* A-MPDU TX */
1949	is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
1950	is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
1951	is_ampdu = is_ampdu_tx | is_ampdu_pending;
1952
1953	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",
1954	    __func__, tid, pri, is_ampdu);
1955
1956	/* Set local packet state, used to queue packets to hardware */
1957	bf->bf_state.bfs_tid = tid;
1958	bf->bf_state.bfs_tx_queue = txq->axq_qnum;
1959	bf->bf_state.bfs_pri = pri;
1960
1961#if 1
1962	/*
1963	 * When servicing one or more stations in power-save mode
1964	 * (or) if there is some mcast data waiting on the mcast
1965	 * queue (to prevent out of order delivery) multicast frames
1966	 * must be bufferd until after the beacon.
1967	 *
1968	 * TODO: we should lock the mcastq before we check the length.
1969	 */
1970	if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {
1971		txq = &avp->av_mcastq;
1972		/*
1973		 * Mark the frame as eventually belonging on the CAB
1974		 * queue, so the descriptor setup functions will
1975		 * correctly initialise the descriptor 'qcuId' field.
1976		 */
1977		bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum;
1978	}
1979#endif
1980
1981	/* Do the generic frame setup */
1982	/* XXX should just bzero the bf_state? */
1983	bf->bf_state.bfs_dobaw = 0;
1984
1985	/* A-MPDU TX? Manually set sequence number */
1986	/*
1987	 * Don't do it whilst pending; the net80211 layer still
1988	 * assigns them.
1989	 */
1990	if (is_ampdu_tx) {
1991		/*
1992		 * Always call; this function will
1993		 * handle making sure that null data frames
1994		 * don't get a sequence number from the current
1995		 * TID and thus mess with the BAW.
1996		 */
1997		seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
1998
1999		/*
2000		 * Don't add QoS NULL frames to the BAW.
2001		 */
2002		if (IEEE80211_QOS_HAS_SEQ(wh) &&
2003		    subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) {
2004			bf->bf_state.bfs_dobaw = 1;
2005		}
2006	}
2007
2008	/*
2009	 * If needed, the sequence number has been assigned.
2010	 * Squirrel it away somewhere easy to get to.
2011	 */
2012	bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
2013
2014	/* Is ampdu pending? fetch the seqno and print it out */
2015	if (is_ampdu_pending)
2016		DPRINTF(sc, ATH_DEBUG_SW_TX,
2017		    "%s: tid %d: ampdu pending, seqno %d\n",
2018		    __func__, tid, M_SEQNO_GET(m0));
2019
2020	/* This also sets up the DMA map */
2021	r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
2022
2023	if (r != 0)
2024		goto done;
2025
2026	/* At this point m0 could have changed! */
2027	m0 = bf->bf_m;
2028
2029#if 1
2030	/*
2031	 * If it's a multicast frame, do a direct-dispatch to the
2032	 * destination hardware queue. Don't bother software
2033	 * queuing it.
2034	 */
2035	/*
2036	 * If it's a BAR frame, do a direct dispatch to the
2037	 * destination hardware queue. Don't bother software
2038	 * queuing it, as the TID will now be paused.
2039	 * Sending a BAR frame can occur from the net80211 txa timer
2040	 * (ie, retries) or from the ath txtask (completion call.)
2041	 * It queues directly to hardware because the TID is paused
2042	 * at this point (and won't be unpaused until the BAR has
2043	 * either been TXed successfully or max retries has been
2044	 * reached.)
2045	 */
2046	/*
2047	 * Until things are better debugged - if this node is asleep
2048	 * and we're sending it a non-BAR frame, direct dispatch it.
2049	 * Why? Because we need to figure out what's actually being
2050	 * sent - eg, during reassociation/reauthentication after
2051	 * the node (last) disappeared whilst asleep, the driver should
2052	 * have unpaused/unsleep'ed the node.  So until that is
2053	 * sorted out, use this workaround.
2054	 */
2055	if (txq == &avp->av_mcastq) {
2056		DPRINTF(sc, ATH_DEBUG_SW_TX,
2057		    "%s: bf=%p: mcastq: TX'ing\n", __func__, bf);
2058		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2059		ath_tx_xmit_normal(sc, txq, bf);
2060	} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2061	    &queue_to_head)) {
2062		ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2063	} else {
2064		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2065		ath_tx_xmit_normal(sc, txq, bf);
2066	}
2067#else
2068	/*
2069	 * For now, since there's no software queue,
2070	 * direct-dispatch to the hardware.
2071	 */
2072	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2073	/*
2074	 * Update the current leak count if
2075	 * we're leaking frames; and set the
2076	 * MORE flag as appropriate.
2077	 */
2078	ath_tx_leak_count_update(sc, tid, bf);
2079	ath_tx_xmit_normal(sc, txq, bf);
2080#endif
2081done:
2082	return 0;
2083}
2084
2085static int
2086ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
2087	struct ath_buf *bf, struct mbuf *m0,
2088	const struct ieee80211_bpf_params *params)
2089{
2090	struct ifnet *ifp = sc->sc_ifp;
2091	struct ieee80211com *ic = ifp->if_l2com;
2092	struct ath_hal *ah = sc->sc_ah;
2093	struct ieee80211vap *vap = ni->ni_vap;
2094	int error, ismcast, ismrr;
2095	int keyix, hdrlen, pktlen, try0, txantenna;
2096	u_int8_t rix, txrate;
2097	struct ieee80211_frame *wh;
2098	u_int flags;
2099	HAL_PKT_TYPE atype;
2100	const HAL_RATE_TABLE *rt;
2101	struct ath_desc *ds;
2102	u_int pri;
2103	int o_tid = -1;
2104	int do_override;
2105	uint8_t type, subtype;
2106	int queue_to_head;
2107
2108	ATH_TX_LOCK_ASSERT(sc);
2109
2110	wh = mtod(m0, struct ieee80211_frame *);
2111	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
2112	hdrlen = ieee80211_anyhdrsize(wh);
2113	/*
2114	 * Packet length must not include any
2115	 * pad bytes; deduct them here.
2116	 */
2117	/* XXX honor IEEE80211_BPF_DATAPAD */
2118	pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
2119
2120	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2121	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2122
2123	ATH_KTR(sc, ATH_KTR_TX, 2,
2124	     "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf);
2125
2126	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",
2127	    __func__, ismcast);
2128
2129	pri = params->ibp_pri & 3;
2130	/* Override pri if the frame isn't a QoS one */
2131	if (! IEEE80211_QOS_HAS_SEQ(wh))
2132		pri = ath_tx_getac(sc, m0);
2133
2134	/* XXX If it's an ADDBA, override the correct queue */
2135	do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);
2136
2137	/* Map ADDBA to the correct priority */
2138	if (do_override) {
2139#if 0
2140		DPRINTF(sc, ATH_DEBUG_XMIT,
2141		    "%s: overriding tid %d pri %d -> %d\n",
2142		    __func__, o_tid, pri, TID_TO_WME_AC(o_tid));
2143#endif
2144		pri = TID_TO_WME_AC(o_tid);
2145	}
2146
2147	/* Handle encryption twiddling if needed */
2148	if (! ath_tx_tag_crypto(sc, ni,
2149	    m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
2150	    &hdrlen, &pktlen, &keyix)) {
2151		ath_freetx(m0);
2152		return EIO;
2153	}
2154	/* packet header may have moved, reset our local pointer */
2155	wh = mtod(m0, struct ieee80211_frame *);
2156
2157	/* Do the generic frame setup */
2158	/* XXX should just bzero the bf_state? */
2159	bf->bf_state.bfs_dobaw = 0;
2160
2161	error = ath_tx_dmasetup(sc, bf, m0);
2162	if (error != 0)
2163		return error;
2164	m0 = bf->bf_m;				/* NB: may have changed */
2165	wh = mtod(m0, struct ieee80211_frame *);
2166	bf->bf_node = ni;			/* NB: held reference */
2167
2168	/* Always enable CLRDMASK for raw frames for now.. */
2169	flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
2170	flags |= HAL_TXDESC_INTREQ;		/* force interrupt */
2171	if (params->ibp_flags & IEEE80211_BPF_RTS)
2172		flags |= HAL_TXDESC_RTSENA;
2173	else if (params->ibp_flags & IEEE80211_BPF_CTS) {
2174		/* XXX assume 11g/11n protection? */
2175		bf->bf_state.bfs_doprot = 1;
2176		flags |= HAL_TXDESC_CTSENA;
2177	}
2178	/* XXX leave ismcast to injector? */
2179	if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
2180		flags |= HAL_TXDESC_NOACK;
2181
2182	rt = sc->sc_currates;
2183	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
2184	rix = ath_tx_findrix(sc, params->ibp_rate0);
2185	txrate = rt->info[rix].rateCode;
2186	if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
2187		txrate |= rt->info[rix].shortPreamble;
2188	sc->sc_txrix = rix;
2189	try0 = params->ibp_try0;
2190	ismrr = (params->ibp_try1 != 0);
2191	txantenna = params->ibp_pri >> 2;
2192	if (txantenna == 0)			/* XXX? */
2193		txantenna = sc->sc_txantenna;
2194
2195	/*
2196	 * Since ctsrate is fixed, store it away for later
2197	 * use when the descriptor fields are being set.
2198	 */
2199	if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA))
2200		bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
2201
2202	/*
2203	 * NB: we mark all packets as type PSPOLL so the h/w won't
2204	 * set the sequence number, duration, etc.
2205	 */
2206	atype = HAL_PKT_TYPE_PSPOLL;
2207
2208	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
2209		ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
2210		    sc->sc_hwmap[rix].ieeerate, -1);
2211
2212	if (ieee80211_radiotap_active_vap(vap)) {
2213		u_int64_t tsf = ath_hal_gettsf64(ah);
2214
2215		sc->sc_tx_th.wt_tsf = htole64(tsf);
2216		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
2217		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2218			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2219		if (m0->m_flags & M_FRAG)
2220			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
2221		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
2222		sc->sc_tx_th.wt_txpower = MIN(params->ibp_power,
2223		    ieee80211_get_node_txpower(ni));
2224		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
2225
2226		ieee80211_radiotap_tx(vap, m0);
2227	}
2228
2229	/*
2230	 * Formulate first tx descriptor with tx controls.
2231	 */
2232	ds = bf->bf_desc;
2233	/* XXX check return value? */
2234
2235	/* Store the decided rate index values away */
2236	bf->bf_state.bfs_pktlen = pktlen;
2237	bf->bf_state.bfs_hdrlen = hdrlen;
2238	bf->bf_state.bfs_atype = atype;
2239	bf->bf_state.bfs_txpower = MIN(params->ibp_power,
2240	    ieee80211_get_node_txpower(ni));
2241	bf->bf_state.bfs_txrate0 = txrate;
2242	bf->bf_state.bfs_try0 = try0;
2243	bf->bf_state.bfs_keyix = keyix;
2244	bf->bf_state.bfs_txantenna = txantenna;
2245	bf->bf_state.bfs_txflags = flags;
2246	bf->bf_state.bfs_shpream =
2247	    !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);
2248
2249	/* Set local packet state, used to queue packets to hardware */
2250	bf->bf_state.bfs_tid = WME_AC_TO_TID(pri);
2251	bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;
2252	bf->bf_state.bfs_pri = pri;
2253
2254	/* XXX this should be done in ath_tx_setrate() */
2255	bf->bf_state.bfs_ctsrate = 0;
2256	bf->bf_state.bfs_ctsduration = 0;
2257	bf->bf_state.bfs_ismrr = ismrr;
2258
2259	/* Blank the legacy rate array */
2260	bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
2261
2262	bf->bf_state.bfs_rc[0].rix =
2263	    ath_tx_findrix(sc, params->ibp_rate0);
2264	bf->bf_state.bfs_rc[0].tries = try0;
2265	bf->bf_state.bfs_rc[0].ratecode = txrate;
2266
2267	if (ismrr) {
2268		int rix;
2269
2270		rix = ath_tx_findrix(sc, params->ibp_rate1);
2271		bf->bf_state.bfs_rc[1].rix = rix;
2272		bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
2273
2274		rix = ath_tx_findrix(sc, params->ibp_rate2);
2275		bf->bf_state.bfs_rc[2].rix = rix;
2276		bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
2277
2278		rix = ath_tx_findrix(sc, params->ibp_rate3);
2279		bf->bf_state.bfs_rc[3].rix = rix;
2280		bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
2281	}
2282	/*
2283	 * All the required rate control decisions have been made;
2284	 * fill in the rc flags.
2285	 */
2286	ath_tx_rate_fill_rcflags(sc, bf);
2287
2288	/* NB: no buffered multicast in power save support */
2289
2290	/*
2291	 * If we're overiding the ADDBA destination, dump directly
2292	 * into the hardware queue, right after any pending
2293	 * frames to that node are.
2294	 */
2295	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",
2296	    __func__, do_override);
2297
2298#if 1
2299	/*
2300	 * Put addba frames in the right place in the right TID/HWQ.
2301	 */
2302	if (do_override) {
2303		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2304		/*
2305		 * XXX if it's addba frames, should we be leaking
2306		 * them out via the frame leak method?
2307		 * XXX for now let's not risk it; but we may wish
2308		 * to investigate this later.
2309		 */
2310		ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2311	} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2312	    &queue_to_head)) {
2313		/* Queue to software queue */
2314		ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
2315	} else {
2316		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2317		ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2318	}
2319#else
2320	/* Direct-dispatch to the hardware */
2321	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2322	/*
2323	 * Update the current leak count if
2324	 * we're leaking frames; and set the
2325	 * MORE flag as appropriate.
2326	 */
2327	ath_tx_leak_count_update(sc, tid, bf);
2328	ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2329#endif
2330	return 0;
2331}
2332
2333/*
2334 * Send a raw frame.
2335 *
2336 * This can be called by net80211.
2337 */
2338int
2339ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2340	const struct ieee80211_bpf_params *params)
2341{
2342	struct ieee80211com *ic = ni->ni_ic;
2343	struct ifnet *ifp = ic->ic_ifp;
2344	struct ath_softc *sc = ifp->if_softc;
2345	struct ath_buf *bf;
2346	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
2347	int error = 0;
2348
2349	ATH_PCU_LOCK(sc);
2350	if (sc->sc_inreset_cnt > 0) {
2351		DPRINTF(sc, ATH_DEBUG_XMIT,
2352		    "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2353		error = EIO;
2354		ATH_PCU_UNLOCK(sc);
2355		goto bad0;
2356	}
2357	sc->sc_txstart_cnt++;
2358	ATH_PCU_UNLOCK(sc);
2359
2360	ATH_TX_LOCK(sc);
2361
2362	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
2363		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__,
2364		    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ?
2365			"!running" : "invalid");
2366		m_freem(m);
2367		error = ENETDOWN;
2368		goto bad;
2369	}
2370
2371	/*
2372	 * Enforce how deep the multicast queue can grow.
2373	 *
2374	 * XXX duplicated in ath_tx_start().
2375	 */
2376	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2377		if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
2378		    > sc->sc_txq_mcastq_maxdepth) {
2379			sc->sc_stats.ast_tx_mcastq_overflow++;
2380			error = ENOBUFS;
2381		}
2382
2383		if (error != 0) {
2384			m_freem(m);
2385			goto bad;
2386		}
2387	}
2388
2389	/*
2390	 * Grab a TX buffer and associated resources.
2391	 */
2392	bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
2393	if (bf == NULL) {
2394		sc->sc_stats.ast_tx_nobuf++;
2395		m_freem(m);
2396		error = ENOBUFS;
2397		goto bad;
2398	}
2399	ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n",
2400	    m, params,  bf);
2401
2402	if (params == NULL) {
2403		/*
2404		 * Legacy path; interpret frame contents to decide
2405		 * precisely how to send the frame.
2406		 */
2407		if (ath_tx_start(sc, ni, bf, m)) {
2408			error = EIO;		/* XXX */
2409			goto bad2;
2410		}
2411	} else {
2412		/*
2413		 * Caller supplied explicit parameters to use in
2414		 * sending the frame.
2415		 */
2416		if (ath_tx_raw_start(sc, ni, bf, m, params)) {
2417			error = EIO;		/* XXX */
2418			goto bad2;
2419		}
2420	}
2421	sc->sc_wd_timer = 5;
2422	ifp->if_opackets++;
2423	sc->sc_stats.ast_tx_raw++;
2424
2425	/*
2426	 * Update the TIM - if there's anything queued to the
2427	 * software queue and power save is enabled, we should
2428	 * set the TIM.
2429	 */
2430	ath_tx_update_tim(sc, ni, 1);
2431
2432	ATH_TX_UNLOCK(sc);
2433
2434	ATH_PCU_LOCK(sc);
2435	sc->sc_txstart_cnt--;
2436	ATH_PCU_UNLOCK(sc);
2437
2438	return 0;
2439bad2:
2440	ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, "
2441	    "bf=%p",
2442	    m,
2443	    params,
2444	    bf);
2445	ATH_TXBUF_LOCK(sc);
2446	ath_returnbuf_head(sc, bf);
2447	ATH_TXBUF_UNLOCK(sc);
2448bad:
2449
2450	ATH_TX_UNLOCK(sc);
2451
2452	ATH_PCU_LOCK(sc);
2453	sc->sc_txstart_cnt--;
2454	ATH_PCU_UNLOCK(sc);
2455bad0:
2456	ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p",
2457	    m, params);
2458	ifp->if_oerrors++;
2459	sc->sc_stats.ast_tx_raw_fail++;
2460	ieee80211_free_node(ni);
2461
2462	return error;
2463}
2464
2465/* Some helper functions */
2466
2467/*
2468 * ADDBA (and potentially others) need to be placed in the same
2469 * hardware queue as the TID/node it's relating to. This is so
2470 * it goes out after any pending non-aggregate frames to the
2471 * same node/TID.
2472 *
2473 * If this isn't done, the ADDBA can go out before the frames
2474 * queued in hardware. Even though these frames have a sequence
2475 * number -earlier- than the ADDBA can be transmitted (but
2476 * no frames whose sequence numbers are after the ADDBA should
2477 * be!) they'll arrive after the ADDBA - and the receiving end
2478 * will simply drop them as being out of the BAW.
2479 *
2480 * The frames can't be appended to the TID software queue - it'll
2481 * never be sent out. So these frames have to be directly
2482 * dispatched to the hardware, rather than queued in software.
2483 * So if this function returns true, the TXQ has to be
2484 * overridden and it has to be directly dispatched.
2485 *
2486 * It's a dirty hack, but someone's gotta do it.
2487 */
2488
2489/*
2490 * XXX doesn't belong here!
2491 */
2492static int
2493ieee80211_is_action(struct ieee80211_frame *wh)
2494{
2495	/* Type: Management frame? */
2496	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
2497	    IEEE80211_FC0_TYPE_MGT)
2498		return 0;
2499
2500	/* Subtype: Action frame? */
2501	if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) !=
2502	    IEEE80211_FC0_SUBTYPE_ACTION)
2503		return 0;
2504
2505	return 1;
2506}
2507
2508#define	MS(_v, _f)	(((_v) & _f) >> _f##_S)
2509/*
2510 * Return an alternate TID for ADDBA request frames.
2511 *
2512 * Yes, this likely should be done in the net80211 layer.
2513 */
2514static int
2515ath_tx_action_frame_override_queue(struct ath_softc *sc,
2516    struct ieee80211_node *ni,
2517    struct mbuf *m0, int *tid)
2518{
2519	struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
2520	struct ieee80211_action_ba_addbarequest *ia;
2521	uint8_t *frm;
2522	uint16_t baparamset;
2523
2524	/* Not action frame? Bail */
2525	if (! ieee80211_is_action(wh))
2526		return 0;
2527
2528	/* XXX Not needed for frames we send? */
2529#if 0
2530	/* Correct length? */
2531	if (! ieee80211_parse_action(ni, m))
2532		return 0;
2533#endif
2534
2535	/* Extract out action frame */
2536	frm = (u_int8_t *)&wh[1];
2537	ia = (struct ieee80211_action_ba_addbarequest *) frm;
2538
2539	/* Not ADDBA? Bail */
2540	if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)
2541		return 0;
2542	if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)
2543		return 0;
2544
2545	/* Extract TID, return it */
2546	baparamset = le16toh(ia->rq_baparamset);
2547	*tid = (int) MS(baparamset, IEEE80211_BAPS_TID);
2548
2549	return 1;
2550}
2551#undef	MS
2552
2553/* Per-node software queue operations */
2554
2555/*
2556 * Add the current packet to the given BAW.
2557 * It is assumed that the current packet
2558 *
2559 * + fits inside the BAW;
2560 * + already has had a sequence number allocated.
2561 *
2562 * Since the BAW status may be modified by both the ath task and
2563 * the net80211/ifnet contexts, the TID must be locked.
2564 */
2565void
2566ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
2567    struct ath_tid *tid, struct ath_buf *bf)
2568{
2569	int index, cindex;
2570	struct ieee80211_tx_ampdu *tap;
2571
2572	ATH_TX_LOCK_ASSERT(sc);
2573
2574	if (bf->bf_state.bfs_isretried)
2575		return;
2576
2577	tap = ath_tx_get_tx_tid(an, tid->tid);
2578
2579	if (! bf->bf_state.bfs_dobaw) {
2580		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2581		    "%s: dobaw=0, seqno=%d, window %d:%d\n",
2582		    __func__, SEQNO(bf->bf_state.bfs_seqno),
2583		    tap->txa_start, tap->txa_wnd);
2584	}
2585
2586	if (bf->bf_state.bfs_addedbaw)
2587		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2588		    "%s: re-added? tid=%d, seqno %d; window %d:%d; "
2589		    "baw head=%d tail=%d\n",
2590		    __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2591		    tap->txa_start, tap->txa_wnd, tid->baw_head,
2592		    tid->baw_tail);
2593
2594	/*
2595	 * Verify that the given sequence number is not outside of the
2596	 * BAW.  Complain loudly if that's the case.
2597	 */
2598	if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2599	    SEQNO(bf->bf_state.bfs_seqno))) {
2600		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2601		    "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; "
2602		    "baw head=%d tail=%d\n",
2603		    __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2604		    tap->txa_start, tap->txa_wnd, tid->baw_head,
2605		    tid->baw_tail);
2606	}
2607
2608	/*
2609	 * ni->ni_txseqs[] is the currently allocated seqno.
2610	 * the txa state contains the current baw start.
2611	 */
2612	index  = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
2613	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2614	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2615	    "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d "
2616	    "baw head=%d tail=%d\n",
2617	    __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2618	    tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head,
2619	    tid->baw_tail);
2620
2621
2622#if 0
2623	assert(tid->tx_buf[cindex] == NULL);
2624#endif
2625	if (tid->tx_buf[cindex] != NULL) {
2626		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2627		    "%s: ba packet dup (index=%d, cindex=%d, "
2628		    "head=%d, tail=%d)\n",
2629		    __func__, index, cindex, tid->baw_head, tid->baw_tail);
2630		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2631		    "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",
2632		    __func__,
2633		    tid->tx_buf[cindex],
2634		    SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),
2635		    bf,
2636		    SEQNO(bf->bf_state.bfs_seqno)
2637		);
2638	}
2639	tid->tx_buf[cindex] = bf;
2640
2641	if (index >= ((tid->baw_tail - tid->baw_head) &
2642	    (ATH_TID_MAX_BUFS - 1))) {
2643		tid->baw_tail = cindex;
2644		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
2645	}
2646}
2647
2648/*
2649 * Flip the BAW buffer entry over from the existing one to the new one.
2650 *
2651 * When software retransmitting a (sub-)frame, it is entirely possible that
2652 * the frame ath_buf is marked as BUSY and can't be immediately reused.
2653 * In that instance the buffer is cloned and the new buffer is used for
2654 * retransmit. We thus need to update the ath_buf slot in the BAW buf
2655 * tracking array to maintain consistency.
2656 */
2657static void
2658ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an,
2659    struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf)
2660{
2661	int index, cindex;
2662	struct ieee80211_tx_ampdu *tap;
2663	int seqno = SEQNO(old_bf->bf_state.bfs_seqno);
2664
2665	ATH_TX_LOCK_ASSERT(sc);
2666
2667	tap = ath_tx_get_tx_tid(an, tid->tid);
2668	index  = ATH_BA_INDEX(tap->txa_start, seqno);
2669	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2670
2671	/*
2672	 * Just warn for now; if it happens then we should find out
2673	 * about it. It's highly likely the aggregation session will
2674	 * soon hang.
2675	 */
2676	if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) {
2677		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2678		    "%s: retransmitted buffer"
2679		    " has mismatching seqno's, BA session may hang.\n",
2680		    __func__);
2681		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2682		    "%s: old seqno=%d, new_seqno=%d\n", __func__,
2683		    old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno);
2684	}
2685
2686	if (tid->tx_buf[cindex] != old_bf) {
2687		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2688		    "%s: ath_buf pointer incorrect; "
2689		    " has m BA session may hang.\n", __func__);
2690		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2691		    "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf);
2692	}
2693
2694	tid->tx_buf[cindex] = new_bf;
2695}
2696
2697/*
2698 * seq_start - left edge of BAW
2699 * seq_next - current/next sequence number to allocate
2700 *
2701 * Since the BAW status may be modified by both the ath task and
2702 * the net80211/ifnet contexts, the TID must be locked.
2703 */
2704static void
2705ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
2706    struct ath_tid *tid, const struct ath_buf *bf)
2707{
2708	int index, cindex;
2709	struct ieee80211_tx_ampdu *tap;
2710	int seqno = SEQNO(bf->bf_state.bfs_seqno);
2711
2712	ATH_TX_LOCK_ASSERT(sc);
2713
2714	tap = ath_tx_get_tx_tid(an, tid->tid);
2715	index  = ATH_BA_INDEX(tap->txa_start, seqno);
2716	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2717
2718	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2719	    "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, "
2720	    "baw head=%d, tail=%d\n",
2721	    __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,
2722	    cindex, tid->baw_head, tid->baw_tail);
2723
2724	/*
2725	 * If this occurs then we have a big problem - something else
2726	 * has slid tap->txa_start along without updating the BAW
2727	 * tracking start/end pointers. Thus the TX BAW state is now
2728	 * completely busted.
2729	 *
2730	 * But for now, since I haven't yet fixed TDMA and buffer cloning,
2731	 * it's quite possible that a cloned buffer is making its way
2732	 * here and causing it to fire off. Disable TDMA for now.
2733	 */
2734	if (tid->tx_buf[cindex] != bf) {
2735		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2736		    "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",
2737		    __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
2738		    tid->tx_buf[cindex],
2739		    (tid->tx_buf[cindex] != NULL) ?
2740		      SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1);
2741	}
2742
2743	tid->tx_buf[cindex] = NULL;
2744
2745	while (tid->baw_head != tid->baw_tail &&
2746	    !tid->tx_buf[tid->baw_head]) {
2747		INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
2748		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
2749	}
2750	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2751	    "%s: baw is now %d:%d, baw head=%d\n",
2752	    __func__, tap->txa_start, tap->txa_wnd, tid->baw_head);
2753}
2754
2755static void
2756ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid,
2757    struct ath_buf *bf)
2758{
2759	struct ieee80211_frame *wh;
2760
2761	ATH_TX_LOCK_ASSERT(sc);
2762
2763	if (tid->an->an_leak_count > 0) {
2764		wh = mtod(bf->bf_m, struct ieee80211_frame *);
2765
2766		/*
2767		 * Update MORE based on the software/net80211 queue states.
2768		 */
2769		if ((tid->an->an_stack_psq > 0)
2770		    || (tid->an->an_swq_depth > 0))
2771			wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
2772		else
2773			wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA;
2774
2775		DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
2776		    "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n",
2777		    __func__,
2778		    tid->an->an_node.ni_macaddr,
2779		    ":",
2780		    tid->an->an_leak_count,
2781		    tid->an->an_stack_psq,
2782		    tid->an->an_swq_depth,
2783		    !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));
2784
2785		/*
2786		 * Re-sync the underlying buffer.
2787		 */
2788		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2789		    BUS_DMASYNC_PREWRITE);
2790
2791		tid->an->an_leak_count --;
2792	}
2793}
2794
2795static int
2796ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid)
2797{
2798
2799	ATH_TX_LOCK_ASSERT(sc);
2800
2801	if (tid->an->an_leak_count > 0) {
2802		return (1);
2803	}
2804	if (tid->paused)
2805		return (0);
2806	return (1);
2807}
2808
2809/*
2810 * Mark the current node/TID as ready to TX.
2811 *
2812 * This is done to make it easy for the software scheduler to
2813 * find which nodes have data to send.
2814 *
2815 * The TXQ lock must be held.
2816 */
2817void
2818ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
2819{
2820	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2821
2822	ATH_TX_LOCK_ASSERT(sc);
2823
2824	/*
2825	 * If we are leaking out a frame to this destination
2826	 * for PS-POLL, ensure that we allow scheduling to
2827	 * occur.
2828	 */
2829	if (! ath_tx_tid_can_tx_or_sched(sc, tid))
2830		return;		/* paused, can't schedule yet */
2831
2832	if (tid->sched)
2833		return;		/* already scheduled */
2834
2835	tid->sched = 1;
2836
2837#if 0
2838	/*
2839	 * If this is a sleeping node we're leaking to, given
2840	 * it a higher priority.  This is so bad for QoS it hurts.
2841	 */
2842	if (tid->an->an_leak_count) {
2843		TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2844	} else {
2845		TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2846	}
2847#endif
2848
2849	/*
2850	 * We can't do the above - it'll confuse the TXQ software
2851	 * scheduler which will keep checking the _head_ TID
2852	 * in the list to see if it has traffic.  If we queue
2853	 * a TID to the head of the list and it doesn't transmit,
2854	 * we'll check it again.
2855	 *
2856	 * So, get the rest of this leaking frames support working
2857	 * and reliable first and _then_ optimise it so they're
2858	 * pushed out in front of any other pending software
2859	 * queued nodes.
2860	 */
2861	TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2862}
2863
2864/*
2865 * Mark the current node as no longer needing to be polled for
2866 * TX packets.
2867 *
2868 * The TXQ lock must be held.
2869 */
2870static void
2871ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
2872{
2873	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2874
2875	ATH_TX_LOCK_ASSERT(sc);
2876
2877	if (tid->sched == 0)
2878		return;
2879
2880	tid->sched = 0;
2881	TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
2882}
2883
2884/*
2885 * Assign a sequence number manually to the given frame.
2886 *
2887 * This should only be called for A-MPDU TX frames.
2888 */
2889static ieee80211_seq
2890ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
2891    struct ath_buf *bf, struct mbuf *m0)
2892{
2893	struct ieee80211_frame *wh;
2894	int tid, pri;
2895	ieee80211_seq seqno;
2896	uint8_t subtype;
2897
2898	/* TID lookup */
2899	wh = mtod(m0, struct ieee80211_frame *);
2900	pri = M_WME_GETAC(m0);			/* honor classification */
2901	tid = WME_AC_TO_TID(pri);
2902	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n",
2903	    __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
2904
2905	/* XXX Is it a control frame? Ignore */
2906
2907	/* Does the packet require a sequence number? */
2908	if (! IEEE80211_QOS_HAS_SEQ(wh))
2909		return -1;
2910
2911	ATH_TX_LOCK_ASSERT(sc);
2912
2913	/*
2914	 * Is it a QOS NULL Data frame? Give it a sequence number from
2915	 * the default TID (IEEE80211_NONQOS_TID.)
2916	 *
2917	 * The RX path of everything I've looked at doesn't include the NULL
2918	 * data frame sequence number in the aggregation state updates, so
2919	 * assigning it a sequence number there will cause a BAW hole on the
2920	 * RX side.
2921	 */
2922	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2923	if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) {
2924		/* XXX no locking for this TID? This is a bit of a problem. */
2925		seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
2926		INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
2927	} else {
2928		/* Manually assign sequence number */
2929		seqno = ni->ni_txseqs[tid];
2930		INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);
2931	}
2932	*(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
2933	M_SEQNO_SET(m0, seqno);
2934
2935	/* Return so caller can do something with it if needed */
2936	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s:  -> seqno=%d\n", __func__, seqno);
2937	return seqno;
2938}
2939
2940/*
2941 * Attempt to direct dispatch an aggregate frame to hardware.
2942 * If the frame is out of BAW, queue.
2943 * Otherwise, schedule it as a single frame.
2944 */
2945static void
2946ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
2947    struct ath_txq *txq, struct ath_buf *bf)
2948{
2949	struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
2950	struct ieee80211_tx_ampdu *tap;
2951
2952	ATH_TX_LOCK_ASSERT(sc);
2953
2954	tap = ath_tx_get_tx_tid(an, tid->tid);
2955
2956	/* paused? queue */
2957	if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
2958		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
2959		/* XXX don't sched - we're paused! */
2960		return;
2961	}
2962
2963	/* outside baw? queue */
2964	if (bf->bf_state.bfs_dobaw &&
2965	    (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2966	    SEQNO(bf->bf_state.bfs_seqno)))) {
2967		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
2968		ath_tx_tid_sched(sc, tid);
2969		return;
2970	}
2971
2972	/*
2973	 * This is a temporary check and should be removed once
2974	 * all the relevant code paths have been fixed.
2975	 *
2976	 * During aggregate retries, it's possible that the head
2977	 * frame will fail (which has the bfs_aggr and bfs_nframes
2978	 * fields set for said aggregate) and will be retried as
2979	 * a single frame.  In this instance, the values should
2980	 * be reset or the completion code will get upset with you.
2981	 */
2982	if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) {
2983		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
2984		    "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__,
2985		    bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes);
2986		bf->bf_state.bfs_aggr = 0;
2987		bf->bf_state.bfs_nframes = 1;
2988	}
2989
2990	/* Update CLRDMASK just before this frame is queued */
2991	ath_tx_update_clrdmask(sc, tid, bf);
2992
2993	/* Direct dispatch to hardware */
2994	ath_tx_do_ratelookup(sc, bf);
2995	ath_tx_calc_duration(sc, bf);
2996	ath_tx_calc_protection(sc, bf);
2997	ath_tx_set_rtscts(sc, bf);
2998	ath_tx_rate_fill_rcflags(sc, bf);
2999	ath_tx_setds(sc, bf);
3000
3001	/* Statistics */
3002	sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;
3003
3004	/* Track per-TID hardware queue depth correctly */
3005	tid->hwq_depth++;
3006
3007	/* Add to BAW */
3008	if (bf->bf_state.bfs_dobaw) {
3009		ath_tx_addto_baw(sc, an, tid, bf);
3010		bf->bf_state.bfs_addedbaw = 1;
3011	}
3012
3013	/* Set completion handler, multi-frame aggregate or not */
3014	bf->bf_comp = ath_tx_aggr_comp;
3015
3016	/*
3017	 * Update the current leak count if
3018	 * we're leaking frames; and set the
3019	 * MORE flag as appropriate.
3020	 */
3021	ath_tx_leak_count_update(sc, tid, bf);
3022
3023	/* Hand off to hardware */
3024	ath_tx_handoff(sc, txq, bf);
3025}
3026
3027/*
3028 * Attempt to send the packet.
3029 * If the queue isn't busy, direct-dispatch.
3030 * If the queue is busy enough, queue the given packet on the
3031 *  relevant software queue.
3032 */
3033void
3034ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
3035    struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3036{
3037	struct ath_node *an = ATH_NODE(ni);
3038	struct ieee80211_frame *wh;
3039	struct ath_tid *atid;
3040	int pri, tid;
3041	struct mbuf *m0 = bf->bf_m;
3042
3043	ATH_TX_LOCK_ASSERT(sc);
3044
3045	/* Fetch the TID - non-QoS frames get assigned to TID 16 */
3046	wh = mtod(m0, struct ieee80211_frame *);
3047	pri = ath_tx_getac(sc, m0);
3048	tid = ath_tx_gettid(sc, m0);
3049	atid = &an->an_tid[tid];
3050
3051	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
3052	    __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
3053
3054	/* Set local packet state, used to queue packets to hardware */
3055	/* XXX potentially duplicate info, re-check */
3056	bf->bf_state.bfs_tid = tid;
3057	bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3058	bf->bf_state.bfs_pri = pri;
3059
3060	/*
3061	 * If the hardware queue isn't busy, queue it directly.
3062	 * If the hardware queue is busy, queue it.
3063	 * If the TID is paused or the traffic it outside BAW, software
3064	 * queue it.
3065	 *
3066	 * If the node is in power-save and we're leaking a frame,
3067	 * leak a single frame.
3068	 */
3069	if (! ath_tx_tid_can_tx_or_sched(sc, atid)) {
3070		/* TID is paused, queue */
3071		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__);
3072		/*
3073		 * If the caller requested that it be sent at a high
3074		 * priority, queue it at the head of the list.
3075		 */
3076		if (queue_to_head)
3077			ATH_TID_INSERT_HEAD(atid, bf, bf_list);
3078		else
3079			ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3080	} else if (ath_tx_ampdu_pending(sc, an, tid)) {
3081		/* AMPDU pending; queue */
3082		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__);
3083		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3084		/* XXX sched? */
3085	} else if (ath_tx_ampdu_running(sc, an, tid)) {
3086		/* AMPDU running, attempt direct dispatch if possible */
3087
3088		/*
3089		 * Always queue the frame to the tail of the list.
3090		 */
3091		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3092
3093		/*
3094		 * If the hardware queue isn't busy, direct dispatch
3095		 * the head frame in the list.  Don't schedule the
3096		 * TID - let it build some more frames first?
3097		 *
3098		 * When running A-MPDU, always just check the hardware
3099		 * queue depth against the aggregate frame limit.
3100		 * We don't want to burst a large number of single frames
3101		 * out to the hardware; we want to aggressively hold back.
3102		 *
3103		 * Otherwise, schedule the TID.
3104		 */
3105		/* XXX TXQ locking */
3106		if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) {
3107			bf = ATH_TID_FIRST(atid);
3108			ATH_TID_REMOVE(atid, bf, bf_list);
3109
3110			/*
3111			 * Ensure it's definitely treated as a non-AMPDU
3112			 * frame - this information may have been left
3113			 * over from a previous attempt.
3114			 */
3115			bf->bf_state.bfs_aggr = 0;
3116			bf->bf_state.bfs_nframes = 1;
3117
3118			/* Queue to the hardware */
3119			ath_tx_xmit_aggr(sc, an, txq, bf);
3120			DPRINTF(sc, ATH_DEBUG_SW_TX,
3121			    "%s: xmit_aggr\n",
3122			    __func__);
3123		} else {
3124			DPRINTF(sc, ATH_DEBUG_SW_TX,
3125			    "%s: ampdu; swq'ing\n",
3126			    __func__);
3127
3128			ath_tx_tid_sched(sc, atid);
3129		}
3130	/*
3131	 * If we're not doing A-MPDU, be prepared to direct dispatch
3132	 * up to both limits if possible.  This particular corner
3133	 * case may end up with packet starvation between aggregate
3134	 * traffic and non-aggregate traffic: we wnat to ensure
3135	 * that non-aggregate stations get a few frames queued to the
3136	 * hardware before the aggregate station(s) get their chance.
3137	 *
3138	 * So if you only ever see a couple of frames direct dispatched
3139	 * to the hardware from a non-AMPDU client, check both here
3140	 * and in the software queue dispatcher to ensure that those
3141	 * non-AMPDU stations get a fair chance to transmit.
3142	 */
3143	/* XXX TXQ locking */
3144	} else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3145		    (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3146		/* AMPDU not running, attempt direct dispatch */
3147		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__);
3148		/* See if clrdmask needs to be set */
3149		ath_tx_update_clrdmask(sc, atid, bf);
3150
3151		/*
3152		 * Update the current leak count if
3153		 * we're leaking frames; and set the
3154		 * MORE flag as appropriate.
3155		 */
3156		ath_tx_leak_count_update(sc, atid, bf);
3157
3158		/*
3159		 * Dispatch the frame.
3160		 */
3161		ath_tx_xmit_normal(sc, txq, bf);
3162	} else {
3163		/* Busy; queue */
3164		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__);
3165		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3166		ath_tx_tid_sched(sc, atid);
3167	}
3168}
3169
3170/*
3171 * Only set the clrdmask bit if none of the nodes are currently
3172 * filtered.
3173 *
3174 * XXX TODO: go through all the callers and check to see
3175 * which are being called in the context of looping over all
3176 * TIDs (eg, if all tids are being paused, resumed, etc.)
3177 * That'll avoid O(n^2) complexity here.
3178 */
3179static void
3180ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an)
3181{
3182	int i;
3183
3184	ATH_TX_LOCK_ASSERT(sc);
3185
3186	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3187		if (an->an_tid[i].isfiltered == 1)
3188			return;
3189	}
3190	an->clrdmask = 1;
3191}
3192
3193/*
3194 * Configure the per-TID node state.
3195 *
3196 * This likely belongs in if_ath_node.c but I can't think of anywhere
3197 * else to put it just yet.
3198 *
3199 * This sets up the SLISTs and the mutex as appropriate.
3200 */
3201void
3202ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
3203{
3204	int i, j;
3205	struct ath_tid *atid;
3206
3207	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3208		atid = &an->an_tid[i];
3209
3210		/* XXX now with this bzer(), is the field 0'ing needed? */
3211		bzero(atid, sizeof(*atid));
3212
3213		TAILQ_INIT(&atid->tid_q);
3214		TAILQ_INIT(&atid->filtq.tid_q);
3215		atid->tid = i;
3216		atid->an = an;
3217		for (j = 0; j < ATH_TID_MAX_BUFS; j++)
3218			atid->tx_buf[j] = NULL;
3219		atid->baw_head = atid->baw_tail = 0;
3220		atid->paused = 0;
3221		atid->sched = 0;
3222		atid->hwq_depth = 0;
3223		atid->cleanup_inprogress = 0;
3224		if (i == IEEE80211_NONQOS_TID)
3225			atid->ac = ATH_NONQOS_TID_AC;
3226		else
3227			atid->ac = TID_TO_WME_AC(i);
3228	}
3229	an->clrdmask = 1;	/* Always start by setting this bit */
3230}
3231
3232/*
3233 * Pause the current TID. This stops packets from being transmitted
3234 * on it.
3235 *
3236 * Since this is also called from upper layers as well as the driver,
3237 * it will get the TID lock.
3238 */
3239static void
3240ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
3241{
3242
3243	ATH_TX_LOCK_ASSERT(sc);
3244	tid->paused++;
3245	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: paused = %d\n",
3246	    __func__, tid->paused);
3247}
3248
3249/*
3250 * Unpause the current TID, and schedule it if needed.
3251 */
3252static void
3253ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)
3254{
3255	ATH_TX_LOCK_ASSERT(sc);
3256
3257	/*
3258	 * There's some odd places where ath_tx_tid_resume() is called
3259	 * when it shouldn't be; this works around that particular issue
3260	 * until it's actually resolved.
3261	 */
3262	if (tid->paused == 0) {
3263		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3264		    "%s: %6D: paused=0?\n", __func__,
3265		    tid->an->an_node.ni_macaddr, ":");
3266	} else {
3267		tid->paused--;
3268	}
3269
3270	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: unpaused = %d\n",
3271	    __func__, tid->paused);
3272
3273	if (tid->paused)
3274		return;
3275
3276	/*
3277	 * Override the clrdmask configuration for the next frame
3278	 * from this TID, just to get the ball rolling.
3279	 */
3280	ath_tx_set_clrdmask(sc, tid->an);
3281
3282	if (tid->axq_depth == 0)
3283		return;
3284
3285	/* XXX isfiltered shouldn't ever be 0 at this point */
3286	if (tid->isfiltered == 1) {
3287		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n",
3288		    __func__);
3289		return;
3290	}
3291
3292	ath_tx_tid_sched(sc, tid);
3293
3294	/*
3295	 * Queue the software TX scheduler.
3296	 */
3297	ath_tx_swq_kick(sc);
3298}
3299
3300/*
3301 * Add the given ath_buf to the TID filtered frame list.
3302 * This requires the TID be filtered.
3303 */
3304static void
3305ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid,
3306    struct ath_buf *bf)
3307{
3308
3309	ATH_TX_LOCK_ASSERT(sc);
3310
3311	if (!tid->isfiltered)
3312		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n",
3313		    __func__);
3314
3315	DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf);
3316
3317	/* Set the retry bit and bump the retry counter */
3318	ath_tx_set_retry(sc, bf);
3319	sc->sc_stats.ast_tx_swfiltered++;
3320
3321	ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list);
3322}
3323
3324/*
3325 * Handle a completed filtered frame from the given TID.
3326 * This just enables/pauses the filtered frame state if required
3327 * and appends the filtered frame to the filtered queue.
3328 */
3329static void
3330ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid,
3331    struct ath_buf *bf)
3332{
3333
3334	ATH_TX_LOCK_ASSERT(sc);
3335
3336	if (! tid->isfiltered) {
3337		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: filter transition\n",
3338		    __func__);
3339		tid->isfiltered = 1;
3340		ath_tx_tid_pause(sc, tid);
3341	}
3342
3343	/* Add the frame to the filter queue */
3344	ath_tx_tid_filt_addbuf(sc, tid, bf);
3345}
3346
3347/*
3348 * Complete the filtered frame TX completion.
3349 *
3350 * If there are no more frames in the hardware queue, unpause/unfilter
3351 * the TID if applicable.  Otherwise we will wait for a node PS transition
3352 * to unfilter.
3353 */
3354static void
3355ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid)
3356{
3357	struct ath_buf *bf;
3358
3359	ATH_TX_LOCK_ASSERT(sc);
3360
3361	if (tid->hwq_depth != 0)
3362		return;
3363
3364	DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: hwq=0, transition back\n",
3365	    __func__);
3366	tid->isfiltered = 0;
3367	/* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */
3368	ath_tx_set_clrdmask(sc, tid->an);
3369
3370	/* XXX this is really quite inefficient */
3371	while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) {
3372		ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3373		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3374	}
3375
3376	ath_tx_tid_resume(sc, tid);
3377}
3378
3379/*
3380 * Called when a single (aggregate or otherwise) frame is completed.
3381 *
3382 * Returns 1 if the buffer could be added to the filtered list
3383 * (cloned or otherwise), 0 if the buffer couldn't be added to the
3384 * filtered list (failed clone; expired retry) and the caller should
3385 * free it and handle it like a failure (eg by sending a BAR.)
3386 */
3387static int
3388ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid,
3389    struct ath_buf *bf)
3390{
3391	struct ath_buf *nbf;
3392	int retval;
3393
3394	ATH_TX_LOCK_ASSERT(sc);
3395
3396	/*
3397	 * Don't allow a filtered frame to live forever.
3398	 */
3399	if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3400		sc->sc_stats.ast_tx_swretrymax++;
3401		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3402		    "%s: bf=%p, seqno=%d, exceeded retries\n",
3403		    __func__,
3404		    bf,
3405		    bf->bf_state.bfs_seqno);
3406		return (0);
3407	}
3408
3409	/*
3410	 * A busy buffer can't be added to the retry list.
3411	 * It needs to be cloned.
3412	 */
3413	if (bf->bf_flags & ATH_BUF_BUSY) {
3414		nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3415		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3416		    "%s: busy buffer clone: %p -> %p\n",
3417		    __func__, bf, nbf);
3418	} else {
3419		nbf = bf;
3420	}
3421
3422	if (nbf == NULL) {
3423		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3424		    "%s: busy buffer couldn't be cloned (%p)!\n",
3425		    __func__, bf);
3426		retval = 1;
3427	} else {
3428		ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3429		retval = 0;
3430	}
3431	ath_tx_tid_filt_comp_complete(sc, tid);
3432
3433	return (retval);
3434}
3435
3436static void
3437ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid,
3438    struct ath_buf *bf_first, ath_bufhead *bf_q)
3439{
3440	struct ath_buf *bf, *bf_next, *nbf;
3441
3442	ATH_TX_LOCK_ASSERT(sc);
3443
3444	bf = bf_first;
3445	while (bf) {
3446		bf_next = bf->bf_next;
3447		bf->bf_next = NULL;	/* Remove it from the aggr list */
3448
3449		/*
3450		 * Don't allow a filtered frame to live forever.
3451		 */
3452		if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3453			sc->sc_stats.ast_tx_swretrymax++;
3454			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3455			    "%s: bf=%p, seqno=%d, exceeded retries\n",
3456			    __func__,
3457			    bf,
3458			    bf->bf_state.bfs_seqno);
3459			TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3460			goto next;
3461		}
3462
3463		if (bf->bf_flags & ATH_BUF_BUSY) {
3464			nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3465			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3466			    "%s: busy buffer cloned: %p -> %p",
3467			    __func__, bf, nbf);
3468		} else {
3469			nbf = bf;
3470		}
3471
3472		/*
3473		 * If the buffer couldn't be cloned, add it to bf_q;
3474		 * the caller will free the buffer(s) as required.
3475		 */
3476		if (nbf == NULL) {
3477			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3478			    "%s: buffer couldn't be cloned! (%p)\n",
3479			    __func__, bf);
3480			TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3481		} else {
3482			ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3483		}
3484next:
3485		bf = bf_next;
3486	}
3487
3488	ath_tx_tid_filt_comp_complete(sc, tid);
3489}
3490
3491/*
3492 * Suspend the queue because we need to TX a BAR.
3493 */
3494static void
3495ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid)
3496{
3497
3498	ATH_TX_LOCK_ASSERT(sc);
3499
3500	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3501	    "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n",
3502	    __func__,
3503	    tid->tid,
3504	    tid->bar_wait,
3505	    tid->bar_tx);
3506
3507	/* We shouldn't be called when bar_tx is 1 */
3508	if (tid->bar_tx) {
3509		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3510		    "%s: bar_tx is 1?!\n", __func__);
3511	}
3512
3513	/* If we've already been called, just be patient. */
3514	if (tid->bar_wait)
3515		return;
3516
3517	/* Wait! */
3518	tid->bar_wait = 1;
3519
3520	/* Only one pause, no matter how many frames fail */
3521	ath_tx_tid_pause(sc, tid);
3522}
3523
3524/*
3525 * We've finished with BAR handling - either we succeeded or
3526 * failed. Either way, unsuspend TX.
3527 */
3528static void
3529ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid)
3530{
3531
3532	ATH_TX_LOCK_ASSERT(sc);
3533
3534	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3535	    "%s: %6D: TID=%d, called\n",
3536	    __func__,
3537	    tid->an->an_node.ni_macaddr,
3538	    ":",
3539	    tid->tid);
3540
3541	if (tid->bar_tx == 0 || tid->bar_wait == 0) {
3542		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3543		    "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3544		    __func__, tid->an->an_node.ni_macaddr, ":",
3545		    tid->tid, tid->bar_tx, tid->bar_wait);
3546	}
3547
3548	tid->bar_tx = tid->bar_wait = 0;
3549	ath_tx_tid_resume(sc, tid);
3550}
3551
3552/*
3553 * Return whether we're ready to TX a BAR frame.
3554 *
3555 * Requires the TID lock be held.
3556 */
3557static int
3558ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid)
3559{
3560
3561	ATH_TX_LOCK_ASSERT(sc);
3562
3563	if (tid->bar_wait == 0 || tid->hwq_depth > 0)
3564		return (0);
3565
3566	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3567	    "%s: %6D: TID=%d, bar ready\n",
3568	    __func__,
3569	    tid->an->an_node.ni_macaddr,
3570	    ":",
3571	    tid->tid);
3572
3573	return (1);
3574}
3575
3576/*
3577 * Check whether the current TID is ready to have a BAR
3578 * TXed and if so, do the TX.
3579 *
3580 * Since the TID/TXQ lock can't be held during a call to
3581 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it,
3582 * sending the BAR and locking it again.
3583 *
3584 * Eventually, the code to send the BAR should be broken out
3585 * from this routine so the lock doesn't have to be reacquired
3586 * just to be immediately dropped by the caller.
3587 */
3588static void
3589ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid)
3590{
3591	struct ieee80211_tx_ampdu *tap;
3592
3593	ATH_TX_LOCK_ASSERT(sc);
3594
3595	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3596	    "%s: %6D: TID=%d, called\n",
3597	    __func__,
3598	    tid->an->an_node.ni_macaddr,
3599	    ":",
3600	    tid->tid);
3601
3602	tap = ath_tx_get_tx_tid(tid->an, tid->tid);
3603
3604	/*
3605	 * This is an error condition!
3606	 */
3607	if (tid->bar_wait == 0 || tid->bar_tx == 1) {
3608		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3609		    "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3610		    __func__, tid->an->an_node.ni_macaddr, ":",
3611		    tid->tid, tid->bar_tx, tid->bar_wait);
3612		return;
3613	}
3614
3615	/* Don't do anything if we still have pending frames */
3616	if (tid->hwq_depth > 0) {
3617		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3618		    "%s: %6D: TID=%d, hwq_depth=%d, waiting\n",
3619		    __func__,
3620		    tid->an->an_node.ni_macaddr,
3621		    ":",
3622		    tid->tid,
3623		    tid->hwq_depth);
3624		return;
3625	}
3626
3627	/* We're now about to TX */
3628	tid->bar_tx = 1;
3629
3630	/*
3631	 * Override the clrdmask configuration for the next frame,
3632	 * just to get the ball rolling.
3633	 */
3634	ath_tx_set_clrdmask(sc, tid->an);
3635
3636	/*
3637	 * Calculate new BAW left edge, now that all frames have either
3638	 * succeeded or failed.
3639	 *
3640	 * XXX verify this is _actually_ the valid value to begin at!
3641	 */
3642	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3643	    "%s: %6D: TID=%d, new BAW left edge=%d\n",
3644	    __func__,
3645	    tid->an->an_node.ni_macaddr,
3646	    ":",
3647	    tid->tid,
3648	    tap->txa_start);
3649
3650	/* Try sending the BAR frame */
3651	/* We can't hold the lock here! */
3652
3653	ATH_TX_UNLOCK(sc);
3654	if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) {
3655		/* Success? Now we wait for notification that it's done */
3656		ATH_TX_LOCK(sc);
3657		return;
3658	}
3659
3660	/* Failure? For now, warn loudly and continue */
3661	ATH_TX_LOCK(sc);
3662	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3663	    "%s: %6D: TID=%d, failed to TX BAR, continue!\n",
3664	    __func__, tid->an->an_node.ni_macaddr, ":",
3665	    tid->tid);
3666	ath_tx_tid_bar_unsuspend(sc, tid);
3667}
3668
3669static void
3670ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an,
3671    struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf)
3672{
3673
3674	ATH_TX_LOCK_ASSERT(sc);
3675
3676	/*
3677	 * If the current TID is running AMPDU, update
3678	 * the BAW.
3679	 */
3680	if (ath_tx_ampdu_running(sc, an, tid->tid) &&
3681	    bf->bf_state.bfs_dobaw) {
3682		/*
3683		 * Only remove the frame from the BAW if it's
3684		 * been transmitted at least once; this means
3685		 * the frame was in the BAW to begin with.
3686		 */
3687		if (bf->bf_state.bfs_retries > 0) {
3688			ath_tx_update_baw(sc, an, tid, bf);
3689			bf->bf_state.bfs_dobaw = 0;
3690		}
3691#if 0
3692		/*
3693		 * This has become a non-fatal error now
3694		 */
3695		if (! bf->bf_state.bfs_addedbaw)
3696			DPRINTF(sc, ATH_DEBUG_SW_TX_BAW
3697			    "%s: wasn't added: seqno %d\n",
3698			    __func__, SEQNO(bf->bf_state.bfs_seqno));
3699#endif
3700	}
3701
3702	/* Strip it out of an aggregate list if it was in one */
3703	bf->bf_next = NULL;
3704
3705	/* Insert on the free queue to be freed by the caller */
3706	TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
3707}
3708
3709static void
3710ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an,
3711    const char *pfx, struct ath_tid *tid, struct ath_buf *bf)
3712{
3713	struct ieee80211_node *ni = &an->an_node;
3714	struct ath_txq *txq;
3715	struct ieee80211_tx_ampdu *tap;
3716
3717	txq = sc->sc_ac2q[tid->ac];
3718	tap = ath_tx_get_tx_tid(an, tid->tid);
3719
3720	DPRINTF(sc, ATH_DEBUG_SW_TX,
3721	    "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, "
3722	    "seqno=%d, retry=%d\n",
3723	    __func__,
3724	    pfx,
3725	    ni->ni_macaddr,
3726	    ":",
3727	    bf,
3728	    bf->bf_state.bfs_addedbaw,
3729	    bf->bf_state.bfs_dobaw,
3730	    SEQNO(bf->bf_state.bfs_seqno),
3731	    bf->bf_state.bfs_retries);
3732	DPRINTF(sc, ATH_DEBUG_SW_TX,
3733	    "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3734	    __func__,
3735	    pfx,
3736	    ni->ni_macaddr,
3737	    ":",
3738	    bf,
3739	    txq->axq_qnum,
3740	    txq->axq_depth,
3741	    txq->axq_aggr_depth);
3742	DPRINTF(sc, ATH_DEBUG_SW_TX,
3743	    "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, "
3744	      "isfiltered=%d\n",
3745	    __func__,
3746	    pfx,
3747	    ni->ni_macaddr,
3748	    ":",
3749	    bf,
3750	    tid->axq_depth,
3751	    tid->hwq_depth,
3752	    tid->bar_wait,
3753	    tid->isfiltered);
3754	DPRINTF(sc, ATH_DEBUG_SW_TX,
3755	    "%s: %s: %6D: tid %d: "
3756	    "sched=%d, paused=%d, "
3757	    "incomp=%d, baw_head=%d, "
3758	    "baw_tail=%d txa_start=%d, ni_txseqs=%d\n",
3759	     __func__,
3760	     pfx,
3761	     ni->ni_macaddr,
3762	     ":",
3763	     tid->tid,
3764	     tid->sched, tid->paused,
3765	     tid->incomp, tid->baw_head,
3766	     tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
3767	     ni->ni_txseqs[tid->tid]);
3768
3769	/* XXX Dump the frame, see what it is? */
3770	ieee80211_dump_pkt(ni->ni_ic,
3771	    mtod(bf->bf_m, const uint8_t *),
3772	    bf->bf_m->m_len, 0, -1);
3773}
3774
3775/*
3776 * Free any packets currently pending in the software TX queue.
3777 *
3778 * This will be called when a node is being deleted.
3779 *
3780 * It can also be called on an active node during an interface
3781 * reset or state transition.
3782 *
3783 * (From Linux/reference):
3784 *
3785 * TODO: For frame(s) that are in the retry state, we will reuse the
3786 * sequence number(s) without setting the retry bit. The
3787 * alternative is to give up on these and BAR the receiver's window
3788 * forward.
3789 */
3790static void
3791ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an,
3792    struct ath_tid *tid, ath_bufhead *bf_cq)
3793{
3794	struct ath_buf *bf;
3795	struct ieee80211_tx_ampdu *tap;
3796	struct ieee80211_node *ni = &an->an_node;
3797	int t;
3798
3799	tap = ath_tx_get_tx_tid(an, tid->tid);
3800
3801	ATH_TX_LOCK_ASSERT(sc);
3802
3803	/* Walk the queue, free frames */
3804	t = 0;
3805	for (;;) {
3806		bf = ATH_TID_FIRST(tid);
3807		if (bf == NULL) {
3808			break;
3809		}
3810
3811		if (t == 0) {
3812			ath_tx_tid_drain_print(sc, an, "norm", tid, bf);
3813			t = 1;
3814		}
3815
3816		ATH_TID_REMOVE(tid, bf, bf_list);
3817		ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3818	}
3819
3820	/* And now, drain the filtered frame queue */
3821	t = 0;
3822	for (;;) {
3823		bf = ATH_TID_FILT_FIRST(tid);
3824		if (bf == NULL)
3825			break;
3826
3827		if (t == 0) {
3828			ath_tx_tid_drain_print(sc, an, "filt", tid, bf);
3829			t = 1;
3830		}
3831
3832		ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3833		ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3834	}
3835
3836	/*
3837	 * Override the clrdmask configuration for the next frame
3838	 * in case there is some future transmission, just to get
3839	 * the ball rolling.
3840	 *
3841	 * This won't hurt things if the TID is about to be freed.
3842	 */
3843	ath_tx_set_clrdmask(sc, tid->an);
3844
3845	/*
3846	 * Now that it's completed, grab the TID lock and update
3847	 * the sequence number and BAW window.
3848	 * Because sequence numbers have been assigned to frames
3849	 * that haven't been sent yet, it's entirely possible
3850	 * we'll be called with some pending frames that have not
3851	 * been transmitted.
3852	 *
3853	 * The cleaner solution is to do the sequence number allocation
3854	 * when the packet is first transmitted - and thus the "retries"
3855	 * check above would be enough to update the BAW/seqno.
3856	 */
3857
3858	/* But don't do it for non-QoS TIDs */
3859	if (tap) {
3860#if 1
3861		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3862		    "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n",
3863		    __func__,
3864		    ni->ni_macaddr,
3865		    ":",
3866		    an,
3867		    tid->tid,
3868		    tap->txa_start);
3869#endif
3870		ni->ni_txseqs[tid->tid] = tap->txa_start;
3871		tid->baw_tail = tid->baw_head;
3872	}
3873}
3874
3875/*
3876 * Reset the TID state.  This must be only called once the node has
3877 * had its frames flushed from this TID, to ensure that no other
3878 * pause / unpause logic can kick in.
3879 */
3880static void
3881ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)
3882{
3883
3884#if 0
3885	tid->bar_wait = tid->bar_tx = tid->isfiltered = 0;
3886	tid->paused = tid->sched = tid->addba_tx_pending = 0;
3887	tid->incomp = tid->cleanup_inprogress = 0;
3888#endif
3889
3890	/*
3891	 * If we have a bar_wait set, we need to unpause the TID
3892	 * here.  Otherwise once cleanup has finished, the TID won't
3893	 * have the right paused counter.
3894	 *
3895	 * XXX I'm not going through resume here - I don't want the
3896	 * node to be rescheuled just yet.  This however should be
3897	 * methodized!
3898	 */
3899	if (tid->bar_wait) {
3900		if (tid->paused > 0) {
3901			tid->paused --;
3902		}
3903	}
3904
3905	/*
3906	 * XXX same with a currently filtered TID.
3907	 *
3908	 * Since this is being called during a flush, we assume that
3909	 * the filtered frame list is actually empty.
3910	 *
3911	 * XXX TODO: add in a check to ensure that the filtered queue
3912	 * depth is actually 0!
3913	 */
3914	if (tid->isfiltered) {
3915		if (tid->paused > 0) {
3916			tid->paused --;
3917		}
3918	}
3919
3920	/*
3921	 * Clear BAR, filtered frames, scheduled and ADDBA pending.
3922	 * The TID may be going through cleanup from the last association
3923	 * where things in the BAW are still in the hardware queue.
3924	 */
3925	tid->bar_wait = 0;
3926	tid->bar_tx = 0;
3927	tid->isfiltered = 0;
3928	tid->sched = 0;
3929	tid->addba_tx_pending = 0;
3930
3931	/*
3932	 * XXX TODO: it may just be enough to walk the HWQs and mark
3933	 * frames for that node as non-aggregate; or mark the ath_node
3934	 * with something that indicates that aggregation is no longer
3935	 * occuring.  Then we can just toss the BAW complaints and
3936	 * do a complete hard reset of state here - no pause, no
3937	 * complete counter, etc.
3938	 */
3939
3940}
3941
3942/*
3943 * Flush all software queued packets for the given node.
3944 *
3945 * This occurs when a completion handler frees the last buffer
3946 * for a node, and the node is thus freed. This causes the node
3947 * to be cleaned up, which ends up calling ath_tx_node_flush.
3948 */
3949void
3950ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
3951{
3952	int tid;
3953	ath_bufhead bf_cq;
3954	struct ath_buf *bf;
3955
3956	TAILQ_INIT(&bf_cq);
3957
3958	ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p",
3959	    &an->an_node);
3960
3961	ATH_TX_LOCK(sc);
3962	DPRINTF(sc, ATH_DEBUG_NODE,
3963	    "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, "
3964	    "swq_depth=%d, clrdmask=%d, leak_count=%d\n",
3965	    __func__,
3966	    an->an_node.ni_macaddr,
3967	    ":",
3968	    an->an_is_powersave,
3969	    an->an_stack_psq,
3970	    an->an_tim_set,
3971	    an->an_swq_depth,
3972	    an->clrdmask,
3973	    an->an_leak_count);
3974
3975	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
3976		struct ath_tid *atid = &an->an_tid[tid];
3977
3978		/* Free packets */
3979		ath_tx_tid_drain(sc, an, atid, &bf_cq);
3980
3981		/* Remove this tid from the list of active tids */
3982		ath_tx_tid_unsched(sc, atid);
3983
3984		/* Reset the per-TID pause, BAR, etc state */
3985		ath_tx_tid_reset(sc, atid);
3986	}
3987
3988	/*
3989	 * Clear global leak count
3990	 */
3991	an->an_leak_count = 0;
3992	ATH_TX_UNLOCK(sc);
3993
3994	/* Handle completed frames */
3995	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
3996		TAILQ_REMOVE(&bf_cq, bf, bf_list);
3997		ath_tx_default_comp(sc, bf, 0);
3998	}
3999}
4000
4001/*
4002 * Drain all the software TXQs currently with traffic queued.
4003 */
4004void
4005ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4006{
4007	struct ath_tid *tid;
4008	ath_bufhead bf_cq;
4009	struct ath_buf *bf;
4010
4011	TAILQ_INIT(&bf_cq);
4012	ATH_TX_LOCK(sc);
4013
4014	/*
4015	 * Iterate over all active tids for the given txq,
4016	 * flushing and unsched'ing them
4017	 */
4018	while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4019		tid = TAILQ_FIRST(&txq->axq_tidq);
4020		ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
4021		ath_tx_tid_unsched(sc, tid);
4022	}
4023
4024	ATH_TX_UNLOCK(sc);
4025
4026	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4027		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4028		ath_tx_default_comp(sc, bf, 0);
4029	}
4030}
4031
4032/*
4033 * Handle completion of non-aggregate session frames.
4034 *
4035 * This (currently) doesn't implement software retransmission of
4036 * non-aggregate frames!
4037 *
4038 * Software retransmission of non-aggregate frames needs to obey
4039 * the strict sequence number ordering, and drop any frames that
4040 * will fail this.
4041 *
4042 * For now, filtered frames and frame transmission will cause
4043 * all kinds of issues.  So we don't support them.
4044 *
4045 * So anyone queuing frames via ath_tx_normal_xmit() or
4046 * ath_tx_hw_queue_norm() must override and set CLRDMASK.
4047 */
4048void
4049ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4050{
4051	struct ieee80211_node *ni = bf->bf_node;
4052	struct ath_node *an = ATH_NODE(ni);
4053	int tid = bf->bf_state.bfs_tid;
4054	struct ath_tid *atid = &an->an_tid[tid];
4055	struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4056
4057	/* The TID state is protected behind the TXQ lock */
4058	ATH_TX_LOCK(sc);
4059
4060	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
4061	    __func__, bf, fail, atid->hwq_depth - 1);
4062
4063	atid->hwq_depth--;
4064
4065#if 0
4066	/*
4067	 * If the frame was filtered, stick it on the filter frame
4068	 * queue and complain about it.  It shouldn't happen!
4069	 */
4070	if ((ts->ts_status & HAL_TXERR_FILT) ||
4071	    (ts->ts_status != 0 && atid->isfiltered)) {
4072		DPRINTF(sc, ATH_DEBUG_SW_TX,
4073		    "%s: isfiltered=%d, ts_status=%d: huh?\n",
4074		    __func__,
4075		    atid->isfiltered,
4076		    ts->ts_status);
4077		ath_tx_tid_filt_comp_buf(sc, atid, bf);
4078	}
4079#endif
4080	if (atid->isfiltered)
4081		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__);
4082	if (atid->hwq_depth < 0)
4083		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
4084		    __func__, atid->hwq_depth);
4085
4086	/*
4087	 * If the queue is filtered, potentially mark it as complete
4088	 * and reschedule it as needed.
4089	 *
4090	 * This is required as there may be a subsequent TX descriptor
4091	 * for this end-node that has CLRDMASK set, so it's quite possible
4092	 * that a filtered frame will be followed by a non-filtered
4093	 * (complete or otherwise) frame.
4094	 *
4095	 * XXX should we do this before we complete the frame?
4096	 */
4097	if (atid->isfiltered)
4098		ath_tx_tid_filt_comp_complete(sc, atid);
4099	ATH_TX_UNLOCK(sc);
4100
4101	/*
4102	 * punt to rate control if we're not being cleaned up
4103	 * during a hw queue drain and the frame wanted an ACK.
4104	 */
4105	if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4106		ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4107		    ts, bf->bf_state.bfs_pktlen,
4108		    1, (ts->ts_status == 0) ? 0 : 1);
4109
4110	ath_tx_default_comp(sc, bf, fail);
4111}
4112
4113/*
4114 * Handle cleanup of aggregate session packets that aren't
4115 * an A-MPDU.
4116 *
4117 * There's no need to update the BAW here - the session is being
4118 * torn down.
4119 */
4120static void
4121ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4122{
4123	struct ieee80211_node *ni = bf->bf_node;
4124	struct ath_node *an = ATH_NODE(ni);
4125	int tid = bf->bf_state.bfs_tid;
4126	struct ath_tid *atid = &an->an_tid[tid];
4127
4128	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
4129	    __func__, tid, atid->incomp);
4130
4131	ATH_TX_LOCK(sc);
4132	atid->incomp--;
4133	if (atid->incomp == 0) {
4134		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4135		    "%s: TID %d: cleaned up! resume!\n",
4136		    __func__, tid);
4137		atid->cleanup_inprogress = 0;
4138		ath_tx_tid_resume(sc, atid);
4139	}
4140	ATH_TX_UNLOCK(sc);
4141
4142	ath_tx_default_comp(sc, bf, 0);
4143}
4144
4145/*
4146 * Performs transmit side cleanup when TID changes from aggregated to
4147 * unaggregated.
4148 *
4149 * - Discard all retry frames from the s/w queue.
4150 * - Fix the tx completion function for all buffers in s/w queue.
4151 * - Count the number of unacked frames, and let transmit completion
4152 *   handle it later.
4153 *
4154 * The caller is responsible for pausing the TID and unpausing the
4155 * TID if no cleanup was required. Otherwise the cleanup path will
4156 * unpause the TID once the last hardware queued frame is completed.
4157 */
4158static void
4159ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid,
4160    ath_bufhead *bf_cq)
4161{
4162	struct ath_tid *atid = &an->an_tid[tid];
4163	struct ieee80211_tx_ampdu *tap;
4164	struct ath_buf *bf, *bf_next;
4165
4166	ATH_TX_LOCK_ASSERT(sc);
4167
4168	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4169	    "%s: TID %d: called\n", __func__, tid);
4170
4171	/*
4172	 * Move the filtered frames to the TX queue, before
4173	 * we run off and discard/process things.
4174	 */
4175	/* XXX this is really quite inefficient */
4176	while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) {
4177		ATH_TID_FILT_REMOVE(atid, bf, bf_list);
4178		ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4179	}
4180
4181	/*
4182	 * Update the frames in the software TX queue:
4183	 *
4184	 * + Discard retry frames in the queue
4185	 * + Fix the completion function to be non-aggregate
4186	 */
4187	bf = ATH_TID_FIRST(atid);
4188	while (bf) {
4189		if (bf->bf_state.bfs_isretried) {
4190			bf_next = TAILQ_NEXT(bf, bf_list);
4191			ATH_TID_REMOVE(atid, bf, bf_list);
4192			if (bf->bf_state.bfs_dobaw) {
4193				ath_tx_update_baw(sc, an, atid, bf);
4194				if (!bf->bf_state.bfs_addedbaw)
4195					DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4196					    "%s: wasn't added: seqno %d\n",
4197					    __func__,
4198					    SEQNO(bf->bf_state.bfs_seqno));
4199			}
4200			bf->bf_state.bfs_dobaw = 0;
4201			/*
4202			 * Call the default completion handler with "fail" just
4203			 * so upper levels are suitably notified about this.
4204			 */
4205			TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
4206			bf = bf_next;
4207			continue;
4208		}
4209		/* Give these the default completion handler */
4210		bf->bf_comp = ath_tx_normal_comp;
4211		bf = TAILQ_NEXT(bf, bf_list);
4212	}
4213
4214	/*
4215	 * Calculate what hardware-queued frames exist based
4216	 * on the current BAW size. Ie, what frames have been
4217	 * added to the TX hardware queue for this TID but
4218	 * not yet ACKed.
4219	 */
4220	tap = ath_tx_get_tx_tid(an, tid);
4221	/* Need the lock - fiddling with BAW */
4222	while (atid->baw_head != atid->baw_tail) {
4223		if (atid->tx_buf[atid->baw_head]) {
4224			atid->incomp++;
4225			atid->cleanup_inprogress = 1;
4226			atid->tx_buf[atid->baw_head] = NULL;
4227		}
4228		INCR(atid->baw_head, ATH_TID_MAX_BUFS);
4229		INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
4230	}
4231
4232	if (atid->cleanup_inprogress)
4233		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4234		    "%s: TID %d: cleanup needed: %d packets\n",
4235		    __func__, tid, atid->incomp);
4236
4237	/* Owner now must free completed frames */
4238}
4239
4240static struct ath_buf *
4241ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
4242    struct ath_tid *tid, struct ath_buf *bf)
4243{
4244	struct ath_buf *nbf;
4245	int error;
4246
4247	/*
4248	 * Clone the buffer.  This will handle the dma unmap and
4249	 * copy the node reference to the new buffer.  If this
4250	 * works out, 'bf' will have no DMA mapping, no mbuf
4251	 * pointer and no node reference.
4252	 */
4253	nbf = ath_buf_clone(sc, bf);
4254
4255#if 0
4256	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n",
4257	    __func__);
4258#endif
4259
4260	if (nbf == NULL) {
4261		/* Failed to clone */
4262		DPRINTF(sc, ATH_DEBUG_XMIT,
4263		    "%s: failed to clone a busy buffer\n",
4264		    __func__);
4265		return NULL;
4266	}
4267
4268	/* Setup the dma for the new buffer */
4269	error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
4270	if (error != 0) {
4271		DPRINTF(sc, ATH_DEBUG_XMIT,
4272		    "%s: failed to setup dma for clone\n",
4273		    __func__);
4274		/*
4275		 * Put this at the head of the list, not tail;
4276		 * that way it doesn't interfere with the
4277		 * busy buffer logic (which uses the tail of
4278		 * the list.)
4279		 */
4280		ATH_TXBUF_LOCK(sc);
4281		ath_returnbuf_head(sc, nbf);
4282		ATH_TXBUF_UNLOCK(sc);
4283		return NULL;
4284	}
4285
4286	/* Update BAW if required, before we free the original buf */
4287	if (bf->bf_state.bfs_dobaw)
4288		ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);
4289
4290	/* Free original buffer; return new buffer */
4291	ath_freebuf(sc, bf);
4292
4293	return nbf;
4294}
4295
4296/*
4297 * Handle retrying an unaggregate frame in an aggregate
4298 * session.
4299 *
4300 * If too many retries occur, pause the TID, wait for
4301 * any further retransmits (as there's no reason why
4302 * non-aggregate frames in an aggregate session are
4303 * transmitted in-order; they just have to be in-BAW)
4304 * and then queue a BAR.
4305 */
4306static void
4307ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4308{
4309	struct ieee80211_node *ni = bf->bf_node;
4310	struct ath_node *an = ATH_NODE(ni);
4311	int tid = bf->bf_state.bfs_tid;
4312	struct ath_tid *atid = &an->an_tid[tid];
4313	struct ieee80211_tx_ampdu *tap;
4314
4315	ATH_TX_LOCK(sc);
4316
4317	tap = ath_tx_get_tx_tid(an, tid);
4318
4319	/*
4320	 * If the buffer is marked as busy, we can't directly
4321	 * reuse it. Instead, try to clone the buffer.
4322	 * If the clone is successful, recycle the old buffer.
4323	 * If the clone is unsuccessful, set bfs_retries to max
4324	 * to force the next bit of code to free the buffer
4325	 * for us.
4326	 */
4327	if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4328	    (bf->bf_flags & ATH_BUF_BUSY)) {
4329		struct ath_buf *nbf;
4330		nbf = ath_tx_retry_clone(sc, an, atid, bf);
4331		if (nbf)
4332			/* bf has been freed at this point */
4333			bf = nbf;
4334		else
4335			bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4336	}
4337
4338	if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4339		DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4340		    "%s: exceeded retries; seqno %d\n",
4341		    __func__, SEQNO(bf->bf_state.bfs_seqno));
4342		sc->sc_stats.ast_tx_swretrymax++;
4343
4344		/* Update BAW anyway */
4345		if (bf->bf_state.bfs_dobaw) {
4346			ath_tx_update_baw(sc, an, atid, bf);
4347			if (! bf->bf_state.bfs_addedbaw)
4348				DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4349				    "%s: wasn't added: seqno %d\n",
4350				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4351		}
4352		bf->bf_state.bfs_dobaw = 0;
4353
4354		/* Suspend the TX queue and get ready to send the BAR */
4355		ath_tx_tid_bar_suspend(sc, atid);
4356
4357		/* Send the BAR if there are no other frames waiting */
4358		if (ath_tx_tid_bar_tx_ready(sc, atid))
4359			ath_tx_tid_bar_tx(sc, atid);
4360
4361		ATH_TX_UNLOCK(sc);
4362
4363		/* Free buffer, bf is free after this call */
4364		ath_tx_default_comp(sc, bf, 0);
4365		return;
4366	}
4367
4368	/*
4369	 * This increments the retry counter as well as
4370	 * sets the retry flag in the ath_buf and packet
4371	 * body.
4372	 */
4373	ath_tx_set_retry(sc, bf);
4374	sc->sc_stats.ast_tx_swretries++;
4375
4376	/*
4377	 * Insert this at the head of the queue, so it's
4378	 * retried before any current/subsequent frames.
4379	 */
4380	ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4381	ath_tx_tid_sched(sc, atid);
4382	/* Send the BAR if there are no other frames waiting */
4383	if (ath_tx_tid_bar_tx_ready(sc, atid))
4384		ath_tx_tid_bar_tx(sc, atid);
4385
4386	ATH_TX_UNLOCK(sc);
4387}
4388
4389/*
4390 * Common code for aggregate excessive retry/subframe retry.
4391 * If retrying, queues buffers to bf_q. If not, frees the
4392 * buffers.
4393 *
4394 * XXX should unify this with ath_tx_aggr_retry_unaggr()
4395 */
4396static int
4397ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,
4398    ath_bufhead *bf_q)
4399{
4400	struct ieee80211_node *ni = bf->bf_node;
4401	struct ath_node *an = ATH_NODE(ni);
4402	int tid = bf->bf_state.bfs_tid;
4403	struct ath_tid *atid = &an->an_tid[tid];
4404
4405	ATH_TX_LOCK_ASSERT(sc);
4406
4407	/* XXX clr11naggr should be done for all subframes */
4408	ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
4409	ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
4410
4411	/* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
4412
4413	/*
4414	 * If the buffer is marked as busy, we can't directly
4415	 * reuse it. Instead, try to clone the buffer.
4416	 * If the clone is successful, recycle the old buffer.
4417	 * If the clone is unsuccessful, set bfs_retries to max
4418	 * to force the next bit of code to free the buffer
4419	 * for us.
4420	 */
4421	if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4422	    (bf->bf_flags & ATH_BUF_BUSY)) {
4423		struct ath_buf *nbf;
4424		nbf = ath_tx_retry_clone(sc, an, atid, bf);
4425		if (nbf)
4426			/* bf has been freed at this point */
4427			bf = nbf;
4428		else
4429			bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4430	}
4431
4432	if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4433		sc->sc_stats.ast_tx_swretrymax++;
4434		DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4435		    "%s: max retries: seqno %d\n",
4436		    __func__, SEQNO(bf->bf_state.bfs_seqno));
4437		ath_tx_update_baw(sc, an, atid, bf);
4438		if (!bf->bf_state.bfs_addedbaw)
4439			DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4440			    "%s: wasn't added: seqno %d\n",
4441			    __func__, SEQNO(bf->bf_state.bfs_seqno));
4442		bf->bf_state.bfs_dobaw = 0;
4443		return 1;
4444	}
4445
4446	ath_tx_set_retry(sc, bf);
4447	sc->sc_stats.ast_tx_swretries++;
4448	bf->bf_next = NULL;		/* Just to make sure */
4449
4450	/* Clear the aggregate state */
4451	bf->bf_state.bfs_aggr = 0;
4452	bf->bf_state.bfs_ndelim = 0;	/* ??? needed? */
4453	bf->bf_state.bfs_nframes = 1;
4454
4455	TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
4456	return 0;
4457}
4458
4459/*
4460 * error pkt completion for an aggregate destination
4461 */
4462static void
4463ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,
4464    struct ath_tid *tid)
4465{
4466	struct ieee80211_node *ni = bf_first->bf_node;
4467	struct ath_node *an = ATH_NODE(ni);
4468	struct ath_buf *bf_next, *bf;
4469	ath_bufhead bf_q;
4470	int drops = 0;
4471	struct ieee80211_tx_ampdu *tap;
4472	ath_bufhead bf_cq;
4473
4474	TAILQ_INIT(&bf_q);
4475	TAILQ_INIT(&bf_cq);
4476
4477	/*
4478	 * Update rate control - all frames have failed.
4479	 *
4480	 * XXX use the length in the first frame in the series;
4481	 * XXX just so things are consistent for now.
4482	 */
4483	ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
4484	    &bf_first->bf_status.ds_txstat,
4485	    bf_first->bf_state.bfs_pktlen,
4486	    bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);
4487
4488	ATH_TX_LOCK(sc);
4489	tap = ath_tx_get_tx_tid(an, tid->tid);
4490	sc->sc_stats.ast_tx_aggr_failall++;
4491
4492	/* Retry all subframes */
4493	bf = bf_first;
4494	while (bf) {
4495		bf_next = bf->bf_next;
4496		bf->bf_next = NULL;	/* Remove it from the aggr list */
4497		sc->sc_stats.ast_tx_aggr_fail++;
4498		if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4499			drops++;
4500			bf->bf_next = NULL;
4501			TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4502		}
4503		bf = bf_next;
4504	}
4505
4506	/* Prepend all frames to the beginning of the queue */
4507	while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4508		TAILQ_REMOVE(&bf_q, bf, bf_list);
4509		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
4510	}
4511
4512	/*
4513	 * Schedule the TID to be re-tried.
4514	 */
4515	ath_tx_tid_sched(sc, tid);
4516
4517	/*
4518	 * send bar if we dropped any frames
4519	 *
4520	 * Keep the txq lock held for now, as we need to ensure
4521	 * that ni_txseqs[] is consistent (as it's being updated
4522	 * in the ifnet TX context or raw TX context.)
4523	 */
4524	if (drops) {
4525		/* Suspend the TX queue and get ready to send the BAR */
4526		ath_tx_tid_bar_suspend(sc, tid);
4527	}
4528
4529	/*
4530	 * Send BAR if required
4531	 */
4532	if (ath_tx_tid_bar_tx_ready(sc, tid))
4533		ath_tx_tid_bar_tx(sc, tid);
4534
4535	ATH_TX_UNLOCK(sc);
4536
4537	/* Complete frames which errored out */
4538	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4539		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4540		ath_tx_default_comp(sc, bf, 0);
4541	}
4542}
4543
4544/*
4545 * Handle clean-up of packets from an aggregate list.
4546 *
4547 * There's no need to update the BAW here - the session is being
4548 * torn down.
4549 */
4550static void
4551ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
4552{
4553	struct ath_buf *bf, *bf_next;
4554	struct ieee80211_node *ni = bf_first->bf_node;
4555	struct ath_node *an = ATH_NODE(ni);
4556	int tid = bf_first->bf_state.bfs_tid;
4557	struct ath_tid *atid = &an->an_tid[tid];
4558
4559	ATH_TX_LOCK(sc);
4560
4561	/* update incomp */
4562	bf = bf_first;
4563	while (bf) {
4564		atid->incomp--;
4565		bf = bf->bf_next;
4566	}
4567
4568	if (atid->incomp == 0) {
4569		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4570		    "%s: TID %d: cleaned up! resume!\n",
4571		    __func__, tid);
4572		atid->cleanup_inprogress = 0;
4573		ath_tx_tid_resume(sc, atid);
4574	}
4575
4576	/* Send BAR if required */
4577	/* XXX why would we send a BAR when transitioning to non-aggregation? */
4578	/*
4579	 * XXX TODO: we should likely just tear down the BAR state here,
4580	 * rather than sending a BAR.
4581	 */
4582	if (ath_tx_tid_bar_tx_ready(sc, atid))
4583		ath_tx_tid_bar_tx(sc, atid);
4584
4585	ATH_TX_UNLOCK(sc);
4586
4587	/* Handle frame completion */
4588	bf = bf_first;
4589	while (bf) {
4590		bf_next = bf->bf_next;
4591		ath_tx_default_comp(sc, bf, 1);
4592		bf = bf_next;
4593	}
4594}
4595
4596/*
4597 * Handle completion of an set of aggregate frames.
4598 *
4599 * Note: the completion handler is the last descriptor in the aggregate,
4600 * not the last descriptor in the first frame.
4601 */
4602static void
4603ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first,
4604    int fail)
4605{
4606	//struct ath_desc *ds = bf->bf_lastds;
4607	struct ieee80211_node *ni = bf_first->bf_node;
4608	struct ath_node *an = ATH_NODE(ni);
4609	int tid = bf_first->bf_state.bfs_tid;
4610	struct ath_tid *atid = &an->an_tid[tid];
4611	struct ath_tx_status ts;
4612	struct ieee80211_tx_ampdu *tap;
4613	ath_bufhead bf_q;
4614	ath_bufhead bf_cq;
4615	int seq_st, tx_ok;
4616	int hasba, isaggr;
4617	uint32_t ba[2];
4618	struct ath_buf *bf, *bf_next;
4619	int ba_index;
4620	int drops = 0;
4621	int nframes = 0, nbad = 0, nf;
4622	int pktlen;
4623	/* XXX there's too much on the stack? */
4624	struct ath_rc_series rc[ATH_RC_NUM];
4625	int txseq;
4626
4627	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
4628	    __func__, atid->hwq_depth);
4629
4630	/*
4631	 * Take a copy; this may be needed -after- bf_first
4632	 * has been completed and freed.
4633	 */
4634	ts = bf_first->bf_status.ds_txstat;
4635
4636	TAILQ_INIT(&bf_q);
4637	TAILQ_INIT(&bf_cq);
4638
4639	/* The TID state is kept behind the TXQ lock */
4640	ATH_TX_LOCK(sc);
4641
4642	atid->hwq_depth--;
4643	if (atid->hwq_depth < 0)
4644		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n",
4645		    __func__, atid->hwq_depth);
4646
4647	/*
4648	 * If the TID is filtered, handle completing the filter
4649	 * transition before potentially kicking it to the cleanup
4650	 * function.
4651	 *
4652	 * XXX this is duplicate work, ew.
4653	 */
4654	if (atid->isfiltered)
4655		ath_tx_tid_filt_comp_complete(sc, atid);
4656
4657	/*
4658	 * Punt cleanup to the relevant function, not our problem now
4659	 */
4660	if (atid->cleanup_inprogress) {
4661		if (atid->isfiltered)
4662			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4663			    "%s: isfiltered=1, normal_comp?\n",
4664			    __func__);
4665		ATH_TX_UNLOCK(sc);
4666		ath_tx_comp_cleanup_aggr(sc, bf_first);
4667		return;
4668	}
4669
4670	/*
4671	 * If the frame is filtered, transition to filtered frame
4672	 * mode and add this to the filtered frame list.
4673	 *
4674	 * XXX TODO: figure out how this interoperates with
4675	 * BAR, pause and cleanup states.
4676	 */
4677	if ((ts.ts_status & HAL_TXERR_FILT) ||
4678	    (ts.ts_status != 0 && atid->isfiltered)) {
4679		if (fail != 0)
4680			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4681			    "%s: isfiltered=1, fail=%d\n", __func__, fail);
4682		ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq);
4683
4684		/* Remove from BAW */
4685		TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) {
4686			if (bf->bf_state.bfs_addedbaw)
4687				drops++;
4688			if (bf->bf_state.bfs_dobaw) {
4689				ath_tx_update_baw(sc, an, atid, bf);
4690				if (!bf->bf_state.bfs_addedbaw)
4691					DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4692					    "%s: wasn't added: seqno %d\n",
4693					    __func__,
4694					    SEQNO(bf->bf_state.bfs_seqno));
4695			}
4696			bf->bf_state.bfs_dobaw = 0;
4697		}
4698		/*
4699		 * If any intermediate frames in the BAW were dropped when
4700		 * handling filtering things, send a BAR.
4701		 */
4702		if (drops)
4703			ath_tx_tid_bar_suspend(sc, atid);
4704
4705		/*
4706		 * Finish up by sending a BAR if required and freeing
4707		 * the frames outside of the TX lock.
4708		 */
4709		goto finish_send_bar;
4710	}
4711
4712	/*
4713	 * XXX for now, use the first frame in the aggregate for
4714	 * XXX rate control completion; it's at least consistent.
4715	 */
4716	pktlen = bf_first->bf_state.bfs_pktlen;
4717
4718	/*
4719	 * Handle errors first!
4720	 *
4721	 * Here, handle _any_ error as a "exceeded retries" error.
4722	 * Later on (when filtered frames are to be specially handled)
4723	 * it'll have to be expanded.
4724	 */
4725#if 0
4726	if (ts.ts_status & HAL_TXERR_XRETRY) {
4727#endif
4728	if (ts.ts_status != 0) {
4729		ATH_TX_UNLOCK(sc);
4730		ath_tx_comp_aggr_error(sc, bf_first, atid);
4731		return;
4732	}
4733
4734	tap = ath_tx_get_tx_tid(an, tid);
4735
4736	/*
4737	 * extract starting sequence and block-ack bitmap
4738	 */
4739	/* XXX endian-ness of seq_st, ba? */
4740	seq_st = ts.ts_seqnum;
4741	hasba = !! (ts.ts_flags & HAL_TX_BA);
4742	tx_ok = (ts.ts_status == 0);
4743	isaggr = bf_first->bf_state.bfs_aggr;
4744	ba[0] = ts.ts_ba_low;
4745	ba[1] = ts.ts_ba_high;
4746
4747	/*
4748	 * Copy the TX completion status and the rate control
4749	 * series from the first descriptor, as it may be freed
4750	 * before the rate control code can get its grubby fingers
4751	 * into things.
4752	 */
4753	memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
4754
4755	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4756	    "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, "
4757	    "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n",
4758	    __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
4759	    isaggr, seq_st, hasba, ba[0], ba[1]);
4760
4761	/*
4762	 * The reference driver doesn't do this; it simply ignores
4763	 * this check in its entirety.
4764	 *
4765	 * I've seen this occur when using iperf to send traffic
4766	 * out tid 1 - the aggregate frames are all marked as TID 1,
4767	 * but the TXSTATUS has TID=0.  So, let's just ignore this
4768	 * check.
4769	 */
4770#if 0
4771	/* Occasionally, the MAC sends a tx status for the wrong TID. */
4772	if (tid != ts.ts_tid) {
4773		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n",
4774		    __func__, tid, ts.ts_tid);
4775		tx_ok = 0;
4776	}
4777#endif
4778
4779	/* AR5416 BA bug; this requires an interface reset */
4780	if (isaggr && tx_ok && (! hasba)) {
4781		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4782		    "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, "
4783		    "seq_st=%d\n",
4784		    __func__, hasba, tx_ok, isaggr, seq_st);
4785		/* XXX TODO: schedule an interface reset */
4786#ifdef ATH_DEBUG
4787		ath_printtxbuf(sc, bf_first,
4788		    sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
4789#endif
4790	}
4791
4792	/*
4793	 * Walk the list of frames, figure out which ones were correctly
4794	 * sent and which weren't.
4795	 */
4796	bf = bf_first;
4797	nf = bf_first->bf_state.bfs_nframes;
4798
4799	/* bf_first is going to be invalid once this list is walked */
4800	bf_first = NULL;
4801
4802	/*
4803	 * Walk the list of completed frames and determine
4804	 * which need to be completed and which need to be
4805	 * retransmitted.
4806	 *
4807	 * For completed frames, the completion functions need
4808	 * to be called at the end of this function as the last
4809	 * node reference may free the node.
4810	 *
4811	 * Finally, since the TXQ lock can't be held during the
4812	 * completion callback (to avoid lock recursion),
4813	 * the completion calls have to be done outside of the
4814	 * lock.
4815	 */
4816	while (bf) {
4817		nframes++;
4818		ba_index = ATH_BA_INDEX(seq_st,
4819		    SEQNO(bf->bf_state.bfs_seqno));
4820		bf_next = bf->bf_next;
4821		bf->bf_next = NULL;	/* Remove it from the aggr list */
4822
4823		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4824		    "%s: checking bf=%p seqno=%d; ack=%d\n",
4825		    __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
4826		    ATH_BA_ISSET(ba, ba_index));
4827
4828		if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
4829			sc->sc_stats.ast_tx_aggr_ok++;
4830			ath_tx_update_baw(sc, an, atid, bf);
4831			bf->bf_state.bfs_dobaw = 0;
4832			if (!bf->bf_state.bfs_addedbaw)
4833				DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4834				    "%s: wasn't added: seqno %d\n",
4835				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4836			bf->bf_next = NULL;
4837			TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4838		} else {
4839			sc->sc_stats.ast_tx_aggr_fail++;
4840			if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4841				drops++;
4842				bf->bf_next = NULL;
4843				TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4844			}
4845			nbad++;
4846		}
4847		bf = bf_next;
4848	}
4849
4850	/*
4851	 * Now that the BAW updates have been done, unlock
4852	 *
4853	 * txseq is grabbed before the lock is released so we
4854	 * have a consistent view of what -was- in the BAW.
4855	 * Anything after this point will not yet have been
4856	 * TXed.
4857	 */
4858	txseq = tap->txa_start;
4859	ATH_TX_UNLOCK(sc);
4860
4861	if (nframes != nf)
4862		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4863		    "%s: num frames seen=%d; bf nframes=%d\n",
4864		    __func__, nframes, nf);
4865
4866	/*
4867	 * Now we know how many frames were bad, call the rate
4868	 * control code.
4869	 */
4870	if (fail == 0)
4871		ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes,
4872		    nbad);
4873
4874	/*
4875	 * send bar if we dropped any frames
4876	 */
4877	if (drops) {
4878		/* Suspend the TX queue and get ready to send the BAR */
4879		ATH_TX_LOCK(sc);
4880		ath_tx_tid_bar_suspend(sc, atid);
4881		ATH_TX_UNLOCK(sc);
4882	}
4883
4884	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4885	    "%s: txa_start now %d\n", __func__, tap->txa_start);
4886
4887	ATH_TX_LOCK(sc);
4888
4889	/* Prepend all frames to the beginning of the queue */
4890	while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4891		TAILQ_REMOVE(&bf_q, bf, bf_list);
4892		ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4893	}
4894
4895	/*
4896	 * Reschedule to grab some further frames.
4897	 */
4898	ath_tx_tid_sched(sc, atid);
4899
4900	/*
4901	 * If the queue is filtered, re-schedule as required.
4902	 *
4903	 * This is required as there may be a subsequent TX descriptor
4904	 * for this end-node that has CLRDMASK set, so it's quite possible
4905	 * that a filtered frame will be followed by a non-filtered
4906	 * (complete or otherwise) frame.
4907	 *
4908	 * XXX should we do this before we complete the frame?
4909	 */
4910	if (atid->isfiltered)
4911		ath_tx_tid_filt_comp_complete(sc, atid);
4912
4913finish_send_bar:
4914
4915	/*
4916	 * Send BAR if required
4917	 */
4918	if (ath_tx_tid_bar_tx_ready(sc, atid))
4919		ath_tx_tid_bar_tx(sc, atid);
4920
4921	ATH_TX_UNLOCK(sc);
4922
4923	/* Do deferred completion */
4924	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4925		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4926		ath_tx_default_comp(sc, bf, 0);
4927	}
4928}
4929
4930/*
4931 * Handle completion of unaggregated frames in an ADDBA
4932 * session.
4933 *
4934 * Fail is set to 1 if the entry is being freed via a call to
4935 * ath_tx_draintxq().
4936 */
4937static void
4938ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
4939{
4940	struct ieee80211_node *ni = bf->bf_node;
4941	struct ath_node *an = ATH_NODE(ni);
4942	int tid = bf->bf_state.bfs_tid;
4943	struct ath_tid *atid = &an->an_tid[tid];
4944	struct ath_tx_status ts;
4945	int drops = 0;
4946
4947	/*
4948	 * Take a copy of this; filtering/cloning the frame may free the
4949	 * bf pointer.
4950	 */
4951	ts = bf->bf_status.ds_txstat;
4952
4953	/*
4954	 * Update rate control status here, before we possibly
4955	 * punt to retry or cleanup.
4956	 *
4957	 * Do it outside of the TXQ lock.
4958	 */
4959	if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4960		ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4961		    &bf->bf_status.ds_txstat,
4962		    bf->bf_state.bfs_pktlen,
4963		    1, (ts.ts_status == 0) ? 0 : 1);
4964
4965	/*
4966	 * This is called early so atid->hwq_depth can be tracked.
4967	 * This unfortunately means that it's released and regrabbed
4968	 * during retry and cleanup. That's rather inefficient.
4969	 */
4970	ATH_TX_LOCK(sc);
4971
4972	if (tid == IEEE80211_NONQOS_TID)
4973		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__);
4974
4975	DPRINTF(sc, ATH_DEBUG_SW_TX,
4976	    "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n",
4977	    __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
4978	    SEQNO(bf->bf_state.bfs_seqno));
4979
4980	atid->hwq_depth--;
4981	if (atid->hwq_depth < 0)
4982		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
4983		    __func__, atid->hwq_depth);
4984
4985	/*
4986	 * If the TID is filtered, handle completing the filter
4987	 * transition before potentially kicking it to the cleanup
4988	 * function.
4989	 */
4990	if (atid->isfiltered)
4991		ath_tx_tid_filt_comp_complete(sc, atid);
4992
4993	/*
4994	 * If a cleanup is in progress, punt to comp_cleanup;
4995	 * rather than handling it here. It's thus their
4996	 * responsibility to clean up, call the completion
4997	 * function in net80211, etc.
4998	 */
4999	if (atid->cleanup_inprogress) {
5000		if (atid->isfiltered)
5001			DPRINTF(sc, ATH_DEBUG_SW_TX,
5002			    "%s: isfiltered=1, normal_comp?\n",
5003			    __func__);
5004		ATH_TX_UNLOCK(sc);
5005		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n",
5006		    __func__);
5007		ath_tx_comp_cleanup_unaggr(sc, bf);
5008		return;
5009	}
5010
5011	/*
5012	 * XXX TODO: how does cleanup, BAR and filtered frame handling
5013	 * overlap?
5014	 *
5015	 * If the frame is filtered OR if it's any failure but
5016	 * the TID is filtered, the frame must be added to the
5017	 * filtered frame list.
5018	 *
5019	 * However - a busy buffer can't be added to the filtered
5020	 * list as it will end up being recycled without having
5021	 * been made available for the hardware.
5022	 */
5023	if ((ts.ts_status & HAL_TXERR_FILT) ||
5024	    (ts.ts_status != 0 && atid->isfiltered)) {
5025		int freeframe;
5026
5027		if (fail != 0)
5028			DPRINTF(sc, ATH_DEBUG_SW_TX,
5029			    "%s: isfiltered=1, fail=%d\n",
5030			    __func__, fail);
5031		freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf);
5032		if (freeframe) {
5033			/* Remove from BAW */
5034			if (bf->bf_state.bfs_addedbaw)
5035				drops++;
5036			if (bf->bf_state.bfs_dobaw) {
5037				ath_tx_update_baw(sc, an, atid, bf);
5038				if (!bf->bf_state.bfs_addedbaw)
5039					DPRINTF(sc, ATH_DEBUG_SW_TX,
5040					    "%s: wasn't added: seqno %d\n",
5041					    __func__, SEQNO(bf->bf_state.bfs_seqno));
5042			}
5043			bf->bf_state.bfs_dobaw = 0;
5044		}
5045
5046		/*
5047		 * If the frame couldn't be filtered, treat it as a drop and
5048		 * prepare to send a BAR.
5049		 */
5050		if (freeframe && drops)
5051			ath_tx_tid_bar_suspend(sc, atid);
5052
5053		/*
5054		 * Send BAR if required
5055		 */
5056		if (ath_tx_tid_bar_tx_ready(sc, atid))
5057			ath_tx_tid_bar_tx(sc, atid);
5058
5059		ATH_TX_UNLOCK(sc);
5060		/*
5061		 * If freeframe is set, then the frame couldn't be
5062		 * cloned and bf is still valid.  Just complete/free it.
5063		 */
5064		if (freeframe)
5065			ath_tx_default_comp(sc, bf, fail);
5066
5067
5068		return;
5069	}
5070	/*
5071	 * Don't bother with the retry check if all frames
5072	 * are being failed (eg during queue deletion.)
5073	 */
5074#if 0
5075	if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
5076#endif
5077	if (fail == 0 && ts.ts_status != 0) {
5078		ATH_TX_UNLOCK(sc);
5079		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n",
5080		    __func__);
5081		ath_tx_aggr_retry_unaggr(sc, bf);
5082		return;
5083	}
5084
5085	/* Success? Complete */
5086	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
5087	    __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
5088	if (bf->bf_state.bfs_dobaw) {
5089		ath_tx_update_baw(sc, an, atid, bf);
5090		bf->bf_state.bfs_dobaw = 0;
5091		if (!bf->bf_state.bfs_addedbaw)
5092			DPRINTF(sc, ATH_DEBUG_SW_TX,
5093			    "%s: wasn't added: seqno %d\n",
5094			    __func__, SEQNO(bf->bf_state.bfs_seqno));
5095	}
5096
5097	/*
5098	 * If the queue is filtered, re-schedule as required.
5099	 *
5100	 * This is required as there may be a subsequent TX descriptor
5101	 * for this end-node that has CLRDMASK set, so it's quite possible
5102	 * that a filtered frame will be followed by a non-filtered
5103	 * (complete or otherwise) frame.
5104	 *
5105	 * XXX should we do this before we complete the frame?
5106	 */
5107	if (atid->isfiltered)
5108		ath_tx_tid_filt_comp_complete(sc, atid);
5109
5110	/*
5111	 * Send BAR if required
5112	 */
5113	if (ath_tx_tid_bar_tx_ready(sc, atid))
5114		ath_tx_tid_bar_tx(sc, atid);
5115
5116	ATH_TX_UNLOCK(sc);
5117
5118	ath_tx_default_comp(sc, bf, fail);
5119	/* bf is freed at this point */
5120}
5121
5122void
5123ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
5124{
5125	if (bf->bf_state.bfs_aggr)
5126		ath_tx_aggr_comp_aggr(sc, bf, fail);
5127	else
5128		ath_tx_aggr_comp_unaggr(sc, bf, fail);
5129}
5130
5131/*
5132 * Schedule some packets from the given node/TID to the hardware.
5133 *
5134 * This is the aggregate version.
5135 */
5136void
5137ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
5138    struct ath_tid *tid)
5139{
5140	struct ath_buf *bf;
5141	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5142	struct ieee80211_tx_ampdu *tap;
5143	ATH_AGGR_STATUS status;
5144	ath_bufhead bf_q;
5145
5146	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
5147	ATH_TX_LOCK_ASSERT(sc);
5148
5149	/*
5150	 * XXX TODO: If we're called for a queue that we're leaking frames to,
5151	 * ensure we only leak one.
5152	 */
5153
5154	tap = ath_tx_get_tx_tid(an, tid->tid);
5155
5156	if (tid->tid == IEEE80211_NONQOS_TID)
5157		DPRINTF(sc, ATH_DEBUG_SW_TX,
5158		    "%s: called for TID=NONQOS_TID?\n", __func__);
5159
5160	for (;;) {
5161		status = ATH_AGGR_DONE;
5162
5163		/*
5164		 * If the upper layer has paused the TID, don't
5165		 * queue any further packets.
5166		 *
5167		 * This can also occur from the completion task because
5168		 * of packet loss; but as its serialised with this code,
5169		 * it won't "appear" half way through queuing packets.
5170		 */
5171		if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5172			break;
5173
5174		bf = ATH_TID_FIRST(tid);
5175		if (bf == NULL) {
5176			break;
5177		}
5178
5179		/*
5180		 * If the packet doesn't fall within the BAW (eg a NULL
5181		 * data frame), schedule it directly; continue.
5182		 */
5183		if (! bf->bf_state.bfs_dobaw) {
5184			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5185			    "%s: non-baw packet\n",
5186			    __func__);
5187			ATH_TID_REMOVE(tid, bf, bf_list);
5188
5189			if (bf->bf_state.bfs_nframes > 1)
5190				DPRINTF(sc, ATH_DEBUG_SW_TX,
5191				    "%s: aggr=%d, nframes=%d\n",
5192				    __func__,
5193				    bf->bf_state.bfs_aggr,
5194				    bf->bf_state.bfs_nframes);
5195
5196			/*
5197			 * This shouldn't happen - such frames shouldn't
5198			 * ever have been queued as an aggregate in the
5199			 * first place.  However, make sure the fields
5200			 * are correctly setup just to be totally sure.
5201			 */
5202			bf->bf_state.bfs_aggr = 0;
5203			bf->bf_state.bfs_nframes = 1;
5204
5205			/* Update CLRDMASK just before this frame is queued */
5206			ath_tx_update_clrdmask(sc, tid, bf);
5207
5208			ath_tx_do_ratelookup(sc, bf);
5209			ath_tx_calc_duration(sc, bf);
5210			ath_tx_calc_protection(sc, bf);
5211			ath_tx_set_rtscts(sc, bf);
5212			ath_tx_rate_fill_rcflags(sc, bf);
5213			ath_tx_setds(sc, bf);
5214			ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5215
5216			sc->sc_aggr_stats.aggr_nonbaw_pkt++;
5217
5218			/* Queue the packet; continue */
5219			goto queuepkt;
5220		}
5221
5222		TAILQ_INIT(&bf_q);
5223
5224		/*
5225		 * Do a rate control lookup on the first frame in the
5226		 * list. The rate control code needs that to occur
5227		 * before it can determine whether to TX.
5228		 * It's inaccurate because the rate control code doesn't
5229		 * really "do" aggregate lookups, so it only considers
5230		 * the size of the first frame.
5231		 */
5232		ath_tx_do_ratelookup(sc, bf);
5233		bf->bf_state.bfs_rc[3].rix = 0;
5234		bf->bf_state.bfs_rc[3].tries = 0;
5235
5236		ath_tx_calc_duration(sc, bf);
5237		ath_tx_calc_protection(sc, bf);
5238
5239		ath_tx_set_rtscts(sc, bf);
5240		ath_tx_rate_fill_rcflags(sc, bf);
5241
5242		status = ath_tx_form_aggr(sc, an, tid, &bf_q);
5243
5244		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5245		    "%s: ath_tx_form_aggr() status=%d\n", __func__, status);
5246
5247		/*
5248		 * No frames to be picked up - out of BAW
5249		 */
5250		if (TAILQ_EMPTY(&bf_q))
5251			break;
5252
5253		/*
5254		 * This assumes that the descriptor list in the ath_bufhead
5255		 * are already linked together via bf_next pointers.
5256		 */
5257		bf = TAILQ_FIRST(&bf_q);
5258
5259		if (status == ATH_AGGR_8K_LIMITED)
5260			sc->sc_aggr_stats.aggr_rts_aggr_limited++;
5261
5262		/*
5263		 * If it's the only frame send as non-aggregate
5264		 * assume that ath_tx_form_aggr() has checked
5265		 * whether it's in the BAW and added it appropriately.
5266		 */
5267		if (bf->bf_state.bfs_nframes == 1) {
5268			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5269			    "%s: single-frame aggregate\n", __func__);
5270
5271			/* Update CLRDMASK just before this frame is queued */
5272			ath_tx_update_clrdmask(sc, tid, bf);
5273
5274			bf->bf_state.bfs_aggr = 0;
5275			bf->bf_state.bfs_ndelim = 0;
5276			ath_tx_setds(sc, bf);
5277			ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5278			if (status == ATH_AGGR_BAW_CLOSED)
5279				sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
5280			else
5281				sc->sc_aggr_stats.aggr_single_pkt++;
5282		} else {
5283			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5284			    "%s: multi-frame aggregate: %d frames, "
5285			    "length %d\n",
5286			     __func__, bf->bf_state.bfs_nframes,
5287			    bf->bf_state.bfs_al);
5288			bf->bf_state.bfs_aggr = 1;
5289			sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
5290			sc->sc_aggr_stats.aggr_aggr_pkt++;
5291
5292			/* Update CLRDMASK just before this frame is queued */
5293			ath_tx_update_clrdmask(sc, tid, bf);
5294
5295			/*
5296			 * Calculate the duration/protection as required.
5297			 */
5298			ath_tx_calc_duration(sc, bf);
5299			ath_tx_calc_protection(sc, bf);
5300
5301			/*
5302			 * Update the rate and rtscts information based on the
5303			 * rate decision made by the rate control code;
5304			 * the first frame in the aggregate needs it.
5305			 */
5306			ath_tx_set_rtscts(sc, bf);
5307
5308			/*
5309			 * Setup the relevant descriptor fields
5310			 * for aggregation. The first descriptor
5311			 * already points to the rest in the chain.
5312			 */
5313			ath_tx_setds_11n(sc, bf);
5314
5315		}
5316	queuepkt:
5317		/* Set completion handler, multi-frame aggregate or not */
5318		bf->bf_comp = ath_tx_aggr_comp;
5319
5320		if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
5321			DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__);
5322
5323		/*
5324		 * Update leak count and frame config if were leaking frames.
5325		 *
5326		 * XXX TODO: it should update all frames in an aggregate
5327		 * correctly!
5328		 */
5329		ath_tx_leak_count_update(sc, tid, bf);
5330
5331		/* Punt to txq */
5332		ath_tx_handoff(sc, txq, bf);
5333
5334		/* Track outstanding buffer count to hardware */
5335		/* aggregates are "one" buffer */
5336		tid->hwq_depth++;
5337
5338		/*
5339		 * Break out if ath_tx_form_aggr() indicated
5340		 * there can't be any further progress (eg BAW is full.)
5341		 * Checking for an empty txq is done above.
5342		 *
5343		 * XXX locking on txq here?
5344		 */
5345		/* XXX TXQ locking */
5346		if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5347		    (status == ATH_AGGR_BAW_CLOSED ||
5348		     status == ATH_AGGR_LEAK_CLOSED))
5349			break;
5350	}
5351}
5352
5353/*
5354 * Schedule some packets from the given node/TID to the hardware.
5355 *
5356 * XXX TODO: this routine doesn't enforce the maximum TXQ depth.
5357 * It just dumps frames into the TXQ.  We should limit how deep
5358 * the transmit queue can grow for frames dispatched to the given
5359 * TXQ.
5360 *
5361 * To avoid locking issues, either we need to own the TXQ lock
5362 * at this point, or we need to pass in the maximum frame count
5363 * from the caller.
5364 */
5365void
5366ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
5367    struct ath_tid *tid)
5368{
5369	struct ath_buf *bf;
5370	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5371
5372	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
5373	    __func__, an, tid->tid);
5374
5375	ATH_TX_LOCK_ASSERT(sc);
5376
5377	/* Check - is AMPDU pending or running? then print out something */
5378	if (ath_tx_ampdu_pending(sc, an, tid->tid))
5379		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n",
5380		    __func__, tid->tid);
5381	if (ath_tx_ampdu_running(sc, an, tid->tid))
5382		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n",
5383		    __func__, tid->tid);
5384
5385	for (;;) {
5386
5387		/*
5388		 * If the upper layers have paused the TID, don't
5389		 * queue any further packets.
5390		 *
5391		 * XXX if we are leaking frames, make sure we decrement
5392		 * that counter _and_ we continue here.
5393		 */
5394		if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5395			break;
5396
5397		bf = ATH_TID_FIRST(tid);
5398		if (bf == NULL) {
5399			break;
5400		}
5401
5402		ATH_TID_REMOVE(tid, bf, bf_list);
5403
5404		/* Sanity check! */
5405		if (tid->tid != bf->bf_state.bfs_tid) {
5406			DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !="
5407			    " tid %d\n", __func__, bf->bf_state.bfs_tid,
5408			    tid->tid);
5409		}
5410		/* Normal completion handler */
5411		bf->bf_comp = ath_tx_normal_comp;
5412
5413		/*
5414		 * Override this for now, until the non-aggregate
5415		 * completion handler correctly handles software retransmits.
5416		 */
5417		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
5418
5419		/* Update CLRDMASK just before this frame is queued */
5420		ath_tx_update_clrdmask(sc, tid, bf);
5421
5422		/* Program descriptors + rate control */
5423		ath_tx_do_ratelookup(sc, bf);
5424		ath_tx_calc_duration(sc, bf);
5425		ath_tx_calc_protection(sc, bf);
5426		ath_tx_set_rtscts(sc, bf);
5427		ath_tx_rate_fill_rcflags(sc, bf);
5428		ath_tx_setds(sc, bf);
5429
5430		/*
5431		 * Update the current leak count if
5432		 * we're leaking frames; and set the
5433		 * MORE flag as appropriate.
5434		 */
5435		ath_tx_leak_count_update(sc, tid, bf);
5436
5437		/* Track outstanding buffer count to hardware */
5438		/* aggregates are "one" buffer */
5439		tid->hwq_depth++;
5440
5441		/* Punt to hardware or software txq */
5442		ath_tx_handoff(sc, txq, bf);
5443	}
5444}
5445
5446/*
5447 * Schedule some packets to the given hardware queue.
5448 *
5449 * This function walks the list of TIDs (ie, ath_node TIDs
5450 * with queued traffic) and attempts to schedule traffic
5451 * from them.
5452 *
5453 * TID scheduling is implemented as a FIFO, with TIDs being
5454 * added to the end of the queue after some frames have been
5455 * scheduled.
5456 */
5457void
5458ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5459{
5460	struct ath_tid *tid, *next, *last;
5461
5462	ATH_TX_LOCK_ASSERT(sc);
5463
5464	/*
5465	 * Don't schedule if the hardware queue is busy.
5466	 * This (hopefully) gives some more time to aggregate
5467	 * some packets in the aggregation queue.
5468	 *
5469	 * XXX It doesn't stop a parallel sender from sneaking
5470	 * in transmitting a frame!
5471	 */
5472	/* XXX TXQ locking */
5473	if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5474		sc->sc_aggr_stats.aggr_sched_nopkt++;
5475		return;
5476	}
5477	if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5478		sc->sc_aggr_stats.aggr_sched_nopkt++;
5479		return;
5480	}
5481
5482	last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5483
5484	TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5485		/*
5486		 * Suspend paused queues here; they'll be resumed
5487		 * once the addba completes or times out.
5488		 */
5489		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
5490		    __func__, tid->tid, tid->paused);
5491		ath_tx_tid_unsched(sc, tid);
5492		/*
5493		 * This node may be in power-save and we're leaking
5494		 * a frame; be careful.
5495		 */
5496		if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
5497			continue;
5498		}
5499		if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
5500			ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
5501		else
5502			ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
5503
5504		/* Not empty? Re-schedule */
5505		if (tid->axq_depth != 0)
5506			ath_tx_tid_sched(sc, tid);
5507
5508		/*
5509		 * Give the software queue time to aggregate more
5510		 * packets.  If we aren't running aggregation then
5511		 * we should still limit the hardware queue depth.
5512		 */
5513		/* XXX TXQ locking */
5514		if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5515			break;
5516		}
5517		if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5518			break;
5519		}
5520
5521		/*
5522		 * If this was the last entry on the original list, stop.
5523		 * Otherwise nodes that have been rescheduled onto the end
5524		 * of the TID FIFO list will just keep being rescheduled.
5525		 *
5526		 * XXX What should we do about nodes that were paused
5527		 * but are pending a leaking frame in response to a ps-poll?
5528		 * They'll be put at the front of the list; so they'll
5529		 * prematurely trigger this condition! Ew.
5530		 */
5531		if (tid == last)
5532			break;
5533	}
5534}
5535
5536/*
5537 * TX addba handling
5538 */
5539
5540/*
5541 * Return net80211 TID struct pointer, or NULL for none
5542 */
5543struct ieee80211_tx_ampdu *
5544ath_tx_get_tx_tid(struct ath_node *an, int tid)
5545{
5546	struct ieee80211_node *ni = &an->an_node;
5547	struct ieee80211_tx_ampdu *tap;
5548
5549	if (tid == IEEE80211_NONQOS_TID)
5550		return NULL;
5551
5552	tap = &ni->ni_tx_ampdu[tid];
5553	return tap;
5554}
5555
5556/*
5557 * Is AMPDU-TX running?
5558 */
5559static int
5560ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
5561{
5562	struct ieee80211_tx_ampdu *tap;
5563
5564	if (tid == IEEE80211_NONQOS_TID)
5565		return 0;
5566
5567	tap = ath_tx_get_tx_tid(an, tid);
5568	if (tap == NULL)
5569		return 0;	/* Not valid; default to not running */
5570
5571	return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
5572}
5573
5574/*
5575 * Is AMPDU-TX negotiation pending?
5576 */
5577static int
5578ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
5579{
5580	struct ieee80211_tx_ampdu *tap;
5581
5582	if (tid == IEEE80211_NONQOS_TID)
5583		return 0;
5584
5585	tap = ath_tx_get_tx_tid(an, tid);
5586	if (tap == NULL)
5587		return 0;	/* Not valid; default to not pending */
5588
5589	return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
5590}
5591
5592/*
5593 * Is AMPDU-TX pending for the given TID?
5594 */
5595
5596
5597/*
5598 * Method to handle sending an ADDBA request.
5599 *
5600 * We tap this so the relevant flags can be set to pause the TID
5601 * whilst waiting for the response.
5602 *
5603 * XXX there's no timeout handler we can override?
5604 */
5605int
5606ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5607    int dialogtoken, int baparamset, int batimeout)
5608{
5609	struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5610	int tid = tap->txa_tid;
5611	struct ath_node *an = ATH_NODE(ni);
5612	struct ath_tid *atid = &an->an_tid[tid];
5613
5614	/*
5615	 * XXX danger Will Robinson!
5616	 *
5617	 * Although the taskqueue may be running and scheduling some more
5618	 * packets, these should all be _before_ the addba sequence number.
5619	 * However, net80211 will keep self-assigning sequence numbers
5620	 * until addba has been negotiated.
5621	 *
5622	 * In the past, these packets would be "paused" (which still works
5623	 * fine, as they're being scheduled to the driver in the same
5624	 * serialised method which is calling the addba request routine)
5625	 * and when the aggregation session begins, they'll be dequeued
5626	 * as aggregate packets and added to the BAW. However, now there's
5627	 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
5628	 * packets. Thus they never get included in the BAW tracking and
5629	 * this can cause the initial burst of packets after the addba
5630	 * negotiation to "hang", as they quickly fall outside the BAW.
5631	 *
5632	 * The "eventual" solution should be to tag these packets with
5633	 * dobaw. Although net80211 has given us a sequence number,
5634	 * it'll be "after" the left edge of the BAW and thus it'll
5635	 * fall within it.
5636	 */
5637	ATH_TX_LOCK(sc);
5638	/*
5639	 * This is a bit annoying.  Until net80211 HT code inherits some
5640	 * (any) locking, we may have this called in parallel BUT only
5641	 * one response/timeout will be called.  Grr.
5642	 */
5643	if (atid->addba_tx_pending == 0) {
5644		ath_tx_tid_pause(sc, atid);
5645		atid->addba_tx_pending = 1;
5646	}
5647	ATH_TX_UNLOCK(sc);
5648
5649	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5650	    "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",
5651	    __func__,
5652	    ni->ni_macaddr,
5653	    ":",
5654	    dialogtoken, baparamset, batimeout);
5655	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5656	    "%s: txa_start=%d, ni_txseqs=%d\n",
5657	    __func__, tap->txa_start, ni->ni_txseqs[tid]);
5658
5659	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5660	    batimeout);
5661}
5662
5663/*
5664 * Handle an ADDBA response.
5665 *
5666 * We unpause the queue so TX'ing can resume.
5667 *
5668 * Any packets TX'ed from this point should be "aggregate" (whether
5669 * aggregate or not) so the BAW is updated.
5670 *
5671 * Note! net80211 keeps self-assigning sequence numbers until
5672 * ampdu is negotiated. This means the initially-negotiated BAW left
5673 * edge won't match the ni->ni_txseq.
5674 *
5675 * So, being very dirty, the BAW left edge is "slid" here to match
5676 * ni->ni_txseq.
5677 *
5678 * What likely SHOULD happen is that all packets subsequent to the
5679 * addba request should be tagged as aggregate and queued as non-aggregate
5680 * frames; thus updating the BAW. For now though, I'll just slide the
5681 * window.
5682 */
5683int
5684ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5685    int status, int code, int batimeout)
5686{
5687	struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5688	int tid = tap->txa_tid;
5689	struct ath_node *an = ATH_NODE(ni);
5690	struct ath_tid *atid = &an->an_tid[tid];
5691	int r;
5692
5693	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5694	    "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__,
5695	    ni->ni_macaddr,
5696	    ":",
5697	    status, code, batimeout);
5698
5699	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5700	    "%s: txa_start=%d, ni_txseqs=%d\n",
5701	    __func__, tap->txa_start, ni->ni_txseqs[tid]);
5702
5703	/*
5704	 * Call this first, so the interface flags get updated
5705	 * before the TID is unpaused. Otherwise a race condition
5706	 * exists where the unpaused TID still doesn't yet have
5707	 * IEEE80211_AGGR_RUNNING set.
5708	 */
5709	r = sc->sc_addba_response(ni, tap, status, code, batimeout);
5710
5711	ATH_TX_LOCK(sc);
5712	atid->addba_tx_pending = 0;
5713	/*
5714	 * XXX dirty!
5715	 * Slide the BAW left edge to wherever net80211 left it for us.
5716	 * Read above for more information.
5717	 */
5718	tap->txa_start = ni->ni_txseqs[tid];
5719	ath_tx_tid_resume(sc, atid);
5720	ATH_TX_UNLOCK(sc);
5721	return r;
5722}
5723
5724
5725/*
5726 * Stop ADDBA on a queue.
5727 *
5728 * This can be called whilst BAR TX is currently active on the queue,
5729 * so make sure this is unblocked before continuing.
5730 */
5731void
5732ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
5733{
5734	struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5735	int tid = tap->txa_tid;
5736	struct ath_node *an = ATH_NODE(ni);
5737	struct ath_tid *atid = &an->an_tid[tid];
5738	ath_bufhead bf_cq;
5739	struct ath_buf *bf;
5740
5741	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n",
5742	    __func__,
5743	    ni->ni_macaddr,
5744	    ":");
5745
5746	/*
5747	 * Pause TID traffic early, so there aren't any races
5748	 * Unblock the pending BAR held traffic, if it's currently paused.
5749	 */
5750	ATH_TX_LOCK(sc);
5751	ath_tx_tid_pause(sc, atid);
5752	if (atid->bar_wait) {
5753		/*
5754		 * bar_unsuspend() expects bar_tx == 1, as it should be
5755		 * called from the TX completion path.  This quietens
5756		 * the warning.  It's cleared for us anyway.
5757		 */
5758		atid->bar_tx = 1;
5759		ath_tx_tid_bar_unsuspend(sc, atid);
5760	}
5761	ATH_TX_UNLOCK(sc);
5762
5763	/* There's no need to hold the TXQ lock here */
5764	sc->sc_addba_stop(ni, tap);
5765
5766	/*
5767	 * ath_tx_tid_cleanup will resume the TID if possible, otherwise
5768	 * it'll set the cleanup flag, and it'll be unpaused once
5769	 * things have been cleaned up.
5770	 */
5771	TAILQ_INIT(&bf_cq);
5772	ATH_TX_LOCK(sc);
5773	ath_tx_tid_cleanup(sc, an, tid, &bf_cq);
5774	/*
5775	 * Unpause the TID if no cleanup is required.
5776	 */
5777	if (! atid->cleanup_inprogress)
5778		ath_tx_tid_resume(sc, atid);
5779	ATH_TX_UNLOCK(sc);
5780
5781	/* Handle completing frames and fail them */
5782	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5783		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5784		ath_tx_default_comp(sc, bf, 1);
5785	}
5786
5787}
5788
5789/*
5790 * Handle a node reassociation.
5791 *
5792 * We may have a bunch of frames queued to the hardware; those need
5793 * to be marked as cleanup.
5794 */
5795void
5796ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an)
5797{
5798	struct ath_tid *tid;
5799	int i;
5800	ath_bufhead bf_cq;
5801	struct ath_buf *bf;
5802
5803	TAILQ_INIT(&bf_cq);
5804
5805	ATH_TX_UNLOCK_ASSERT(sc);
5806
5807	ATH_TX_LOCK(sc);
5808	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
5809		tid = &an->an_tid[i];
5810		if (tid->hwq_depth == 0)
5811			continue;
5812		ath_tx_tid_pause(sc, tid);
5813		DPRINTF(sc, ATH_DEBUG_NODE,
5814		    "%s: %6D: TID %d: cleaning up TID\n",
5815		    __func__,
5816		    an->an_node.ni_macaddr,
5817		    ":",
5818		    i);
5819		ath_tx_tid_cleanup(sc, an, i, &bf_cq);
5820		/*
5821		 * Unpause the TID if no cleanup is required.
5822		 */
5823		if (! tid->cleanup_inprogress)
5824			ath_tx_tid_resume(sc, tid);
5825	}
5826	ATH_TX_UNLOCK(sc);
5827
5828	/* Handle completing frames and fail them */
5829	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5830		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5831		ath_tx_default_comp(sc, bf, 1);
5832	}
5833}
5834
5835/*
5836 * Note: net80211 bar_timeout() doesn't call this function on BAR failure;
5837 * it simply tears down the aggregation session. Ew.
5838 *
5839 * It however will call ieee80211_ampdu_stop() which will call
5840 * ic->ic_addba_stop().
5841 *
5842 * XXX This uses a hard-coded max BAR count value; the whole
5843 * XXX BAR TX success or failure should be better handled!
5844 */
5845void
5846ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5847    int status)
5848{
5849	struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5850	int tid = tap->txa_tid;
5851	struct ath_node *an = ATH_NODE(ni);
5852	struct ath_tid *atid = &an->an_tid[tid];
5853	int attempts = tap->txa_attempts;
5854
5855	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
5856	    "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d\n",
5857	    __func__,
5858	    ni->ni_macaddr,
5859	    ":",
5860	    tap->txa_tid,
5861	    atid->tid,
5862	    status,
5863	    attempts);
5864
5865	/* Note: This may update the BAW details */
5866	sc->sc_bar_response(ni, tap, status);
5867
5868	/* Unpause the TID */
5869	/*
5870	 * XXX if this is attempt=50, the TID will be downgraded
5871	 * XXX to a non-aggregate session. So we must unpause the
5872	 * XXX TID here or it'll never be done.
5873	 *
5874	 * Also, don't call it if bar_tx/bar_wait are 0; something
5875	 * has beaten us to the punch? (XXX figure out what?)
5876	 */
5877	if (status == 0 || attempts == 50) {
5878		ATH_TX_LOCK(sc);
5879		if (atid->bar_tx == 0 || atid->bar_wait == 0)
5880			DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
5881			    "%s: huh? bar_tx=%d, bar_wait=%d\n",
5882			    __func__,
5883			    atid->bar_tx, atid->bar_wait);
5884		else
5885			ath_tx_tid_bar_unsuspend(sc, atid);
5886		ATH_TX_UNLOCK(sc);
5887	}
5888}
5889
5890/*
5891 * This is called whenever the pending ADDBA request times out.
5892 * Unpause and reschedule the TID.
5893 */
5894void
5895ath_addba_response_timeout(struct ieee80211_node *ni,
5896    struct ieee80211_tx_ampdu *tap)
5897{
5898	struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5899	int tid = tap->txa_tid;
5900	struct ath_node *an = ATH_NODE(ni);
5901	struct ath_tid *atid = &an->an_tid[tid];
5902
5903	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5904	    "%s: %6D: TID=%d, called; resuming\n",
5905	    __func__,
5906	    ni->ni_macaddr,
5907	    ":",
5908	    tid);
5909
5910	ATH_TX_LOCK(sc);
5911	atid->addba_tx_pending = 0;
5912	ATH_TX_UNLOCK(sc);
5913
5914	/* Note: This updates the aggregate state to (again) pending */
5915	sc->sc_addba_response_timeout(ni, tap);
5916
5917	/* Unpause the TID; which reschedules it */
5918	ATH_TX_LOCK(sc);
5919	ath_tx_tid_resume(sc, atid);
5920	ATH_TX_UNLOCK(sc);
5921}
5922
5923/*
5924 * Check if a node is asleep or not.
5925 */
5926int
5927ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an)
5928{
5929
5930	ATH_TX_LOCK_ASSERT(sc);
5931
5932	return (an->an_is_powersave);
5933}
5934
5935/*
5936 * Mark a node as currently "in powersaving."
5937 * This suspends all traffic on the node.
5938 *
5939 * This must be called with the node/tx locks free.
5940 *
5941 * XXX TODO: the locking silliness below is due to how the node
5942 * locking currently works.  Right now, the node lock is grabbed
5943 * to do rate control lookups and these are done with the TX
5944 * queue lock held.  This means the node lock can't be grabbed
5945 * first here or a LOR will occur.
5946 *
5947 * Eventually (hopefully!) the TX path code will only grab
5948 * the TXQ lock when transmitting and the ath_node lock when
5949 * doing node/TID operations.  There are other complications -
5950 * the sched/unsched operations involve walking the per-txq
5951 * 'active tid' list and this requires both locks to be held.
5952 */
5953void
5954ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)
5955{
5956	struct ath_tid *atid;
5957	struct ath_txq *txq;
5958	int tid;
5959
5960	ATH_TX_UNLOCK_ASSERT(sc);
5961
5962	/* Suspend all traffic on the node */
5963	ATH_TX_LOCK(sc);
5964
5965	if (an->an_is_powersave) {
5966		DPRINTF(sc, ATH_DEBUG_XMIT,
5967		    "%s: %6D: node was already asleep!\n",
5968		    __func__, an->an_node.ni_macaddr, ":");
5969		ATH_TX_UNLOCK(sc);
5970		return;
5971	}
5972
5973	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
5974		atid = &an->an_tid[tid];
5975		txq = sc->sc_ac2q[atid->ac];
5976
5977		ath_tx_tid_pause(sc, atid);
5978	}
5979
5980	/* Mark node as in powersaving */
5981	an->an_is_powersave = 1;
5982
5983	ATH_TX_UNLOCK(sc);
5984}
5985
5986/*
5987 * Mark a node as currently "awake."
5988 * This resumes all traffic to the node.
5989 */
5990void
5991ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)
5992{
5993	struct ath_tid *atid;
5994	struct ath_txq *txq;
5995	int tid;
5996
5997	ATH_TX_UNLOCK_ASSERT(sc);
5998
5999	ATH_TX_LOCK(sc);
6000
6001	/* !? */
6002	if (an->an_is_powersave == 0) {
6003		ATH_TX_UNLOCK(sc);
6004		DPRINTF(sc, ATH_DEBUG_XMIT,
6005		    "%s: an=%p: node was already awake\n",
6006		    __func__, an);
6007		return;
6008	}
6009
6010	/* Mark node as awake */
6011	an->an_is_powersave = 0;
6012	/*
6013	 * Clear any pending leaked frame requests
6014	 */
6015	an->an_leak_count = 0;
6016
6017	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6018		atid = &an->an_tid[tid];
6019		txq = sc->sc_ac2q[atid->ac];
6020
6021		ath_tx_tid_resume(sc, atid);
6022	}
6023	ATH_TX_UNLOCK(sc);
6024}
6025
6026static int
6027ath_legacy_dma_txsetup(struct ath_softc *sc)
6028{
6029
6030	/* nothing new needed */
6031	return (0);
6032}
6033
6034static int
6035ath_legacy_dma_txteardown(struct ath_softc *sc)
6036{
6037
6038	/* nothing new needed */
6039	return (0);
6040}
6041
6042void
6043ath_xmit_setup_legacy(struct ath_softc *sc)
6044{
6045	/*
6046	 * For now, just set the descriptor length to sizeof(ath_desc);
6047	 * worry about extracting the real length out of the HAL later.
6048	 */
6049	sc->sc_tx_desclen = sizeof(struct ath_desc);
6050	sc->sc_tx_statuslen = sizeof(struct ath_desc);
6051	sc->sc_tx_nmaps = 1;	/* only one buffer per TX desc */
6052
6053	sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;
6054	sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;
6055	sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;
6056
6057	sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;
6058	sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;
6059
6060	sc->sc_tx.xmit_drain = ath_legacy_tx_drain;
6061}
6062